import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV
from sklearn import tree
from bayes_opt import BayesianOptimization
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import SelectFromModel

from lightgbm import LGBMClassifier
from sklearn.svm import LinearSVC
from xgboost import XGBClassifier,plot_importance
from sklearn.metrics import precision_score,recall_score,f1_score,brier_score_loss,log_loss,roc_auc_score
from category_encoders import WOEEncoder ,OneHotEncoder,CatBoostEncoder,TargetEncoder
from  lightgbm import LGBMRegressor
from pandas_profiling import ProfileReport
import pandas_profiling
import matplotlib.pyplot as plt
import seaborn as sns

import time
import warnings

from  sklearn.impute import SimpleImputer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import mutual_info_regression  as mir
from sklearn.preprocessing import StandardScaler

from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot  as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression as LR
from sklearn.tree import DecisionTreeClassifier
import math
import pandas as pd
import numpy as np
warnings.filterwarnings('ignore')


#pandas profiling包应用
def profiling(train,test):
    train_y0 = train[train['isDefault'] == 0]
    train_y1 = train[train['isDefault'] == 1]
    pfr_y1 = pandas_profiling.ProfileReport(train_y1)
    # pfr_y1.to_file("./train_y1.html")

    pfr_y0 = pandas_profiling.ProfileReport(train_y0)
    # pfr_y0.to_file("./train_y0.html")

    pfr = pandas_profiling.ProfileReport(train)
    # pfr.to_file("./train.html")

    pfr_y = pandas_profiling.ProfileReport(test)
    # pfr_y.to_file("./test.html")

#特征工程
def Feature_processing(x):
    numerrical = ['loanAmnt'  ,  'interestRate'  ,'installment'  ,'annualIncome'  ,'dti'  ,
                  'delinquency_2years','ficoRangeHigh'  ,'ficoRangeLow'  ,'openAcc',
                  'pubRec','pubRecBankruptcies','revolBal',  'revolUtil'  ,'totalAcc']
    nominal = [  'term'  ,'employmentTitle',   'homeOwnership',  'verificationStatus'  ,
               'purpose','postCode','regionCode','initialListStatus','applicationType',
               'title','n0','n1','n2','n3','n4','n5','n6','n7','n8',  'n9','n10','n11','n12',
               'n13',  'n14','id']
    ordinal = ['grade','subGrade'  ,'employmentLength'  ,'earliesCreditLine'  ,'issueDate'  ]
    y = ['isDefault']
#numerrical表示数值特征
#nominal表示无顺序类别特征
#ordinal表示有顺序的类别特征
#y表示预测值
    '''
    x['Income_installment']=round(x.loc[:,'annualIncome']/x.loc[:,'installment'],2)     #round 四舍五入
    x['loanAmnt_installment']=round(x.loc[:,'loanAmnt']/x.loc[:,'installment'],2)
    x['debt']=round(x.loc[:,'annualIncome']*x.loc[:,'dti'],2)
    x['loanAmnt_debt'] = round(x.loc[:, 'loanAmnt'] / x.loc[:, 'debt'], 2)
    #x['loanAmnt_debt']=round(x.loc[:,'annualIncome']/x.loc[:,'debt'],2)'''

    def grace_Processing(train):
        OrdinalEncoder().fit(train.loc[:, ["grade", "subGrade"]]).categories_
        train.loc[:, ["grade", "subGrade"]] = OrdinalEncoder().fit_transform(train.loc[:, ["grade", "subGrade"]])
        return train
    grace_Processing(x)

    def employmentLength_to_int(s):
        if pd.isnull(s):
            return s
        else:
            return np.int8(s.split()[0])
    x["employmentLength"].replace(to_replace="10+ years", value="11 years", inplace=True)
    x["employmentLength"].replace(to_replace="< 1 year", value="0 years", inplace=True)
    x['employmentLength'] = x.loc[:,"employmentLength"].apply(employmentLength_to_int)  #apply F(X)

    x['issueDate'] = x.loc[:,"issueDate"].apply(lambda s: int(s[:4]))
    x['earliesCreditLine'] = x.loc[:,'earliesCreditLine'].apply(lambda s: int(s[-4:]))
    '''
    x['CreditLine'] = x.loc[:,'earliesCreditLine'] - x.loc[:,'issueDate']

    x['fico']=(x.loc[:,'ficoRangeHigh']+x.loc[:,'ficoRangeLow'])*0.5
    print(numerrical)

    '''
    #numerrical=list(set(numerrical) - {'ficoRangeHigh', 'ficoRangeLow',}) + ['Income_installment','loanAmnt_installment','loanAmnt_debt','fico']
    '''
    numerrical = list(set(numerrical) - {'ficoRangeHigh', 'ficoRangeLow','annualIncome',
                  'delinquency_2years','openAcc','dti','loanAmnt','installment',
                  'pubRec','pubRecBankruptcies','revolBal','totalAcc'}) + ['Income_installment','loanAmnt_installment', 'loanAmnt_debt','fico']'''
    #nominal=list(set(nominal)-{'id','n10', 'n2'})
    #nominal = list(set(nominal) - {'n0','n1','n3','n4','n5','n6','n7','n8','n10','n11','n12','n13','id'})
    '''nominal = list(set(nominal) - {'employmentTitle',
               'purpose','postCode','regionCode','initialListStatus','applicationType',
               'title','n0','n1','n3','n4','n5','n6','n7','n8','n10','n11','n12',
               'n13','id'})'''
    #ordinal=list(set(ordinal) - {'grade', 'earliesCreditLine', 'issueDate'}) + ['CreditLine']

    print(numerrical+nominal+ordinal+y)

    x=x[numerrical+nominal+ordinal+y]
    train = x[x['isDefault'].notnull()]     #notnull 去除
    train_y = train.pop('isDefault')        #删？

    test = x[x['isDefault'].isnull()]
    test_y = test.pop('isDefault')

    train_x, vel_x, train_y, vel_y = train_test_split(train, train_y, test_size=0.25, random_state=0)

    def Category_Encoders(train_x, train_y, test_x, vel_x):
        for col in nominal:
            distinct = train_x[col].nunique()
            if distinct < 4 and distinct >2:
                enc = OneHotEncoder(handle_missing='indicator').fit(train_x[col], train_y)      #矩阵化编码
            elif distinct >= 4:
                # enc = WOEEncoder().fit(train_x[col], train_y)
                # enc = TargetEncoder().fit(train_x[col],train_y)
                enc = CatBoostEncoder().fit(train_x[col],train_y)       #？？？

            train_x[col] = enc.transform(train_x[col])
            test_x[col] = enc.transform(test_x[col])
            vel_x[col] = enc.transform(vel_x[col])

        return train_x, test_x, vel_x

    #train_x,test,vel_x=Category_Encoders(train_x,train_y,test,vel_x)

    return train_x, train_y, vel_y, vel_x, test

#xgb（调完参数）后应用
def GS(x,y,adjust=False):       #模型调参非贝叶斯
    t1=time.perf_counter()
    other_params = {'booster': 'gbtree','eta': 0.1,'nthread': 4,'eval_metric': 'auc','objective': 'binary:logistic',
                    'colsample_bytree': 0.4354, 'gamma': 9.888, 'max_delta_step': 4,'n_estimators':1000,'learning_rate':0.02,
                    'max_depth': 10, 'min_child_weight': 3.268, 'subsample': 0.7157}
    m = XGBClassifier(**other_params)
    if adjust==False:
        m.fit(x, y)
        plot_importance(m, max_num_features=25, height=0.5)
        plt.show()
    if adjust==True:
        cv_params = {}
        m = GridSearchCV(estimator=m, param_grid=cv_params, scoring='roc_auc', cv=2)
        m.fit(x, y)
        evalute_result = m.cv_results_
        print('每轮迭代运行结果:{0}'.format(evalute_result))
        best_params = m.best_params_
        best_score = m.best_score_
        print(best_params, best_score)

    t2=time.perf_counter()
    print('耗时：',(t2-t1))
    return m

#XGB调参
def BO_xgb(x,y):
    t1=time.perf_counter()

    def xgb_cv(max_depth,gamma,min_child_weight,max_delta_step,subsample,colsample_bytree):
        paramt={'booster': 'gbtree',
                'max_depth': int(max_depth),
                'gamma': gamma,
                'eta': 0.1,
                'objective': 'binary:logistic',
                'nthread': 4,
                'eval_metric': 'auc',
                'subsample': max(min(subsample, 1), 0),
                'colsample_bytree': max(min(colsample_bytree, 1), 0),
                'min_child_weight': min_child_weight,
                'max_delta_step': int(max_delta_step),
                'seed': 1001}
        model=XGBClassifier(**paramt)
        res = cross_val_score(model,x, y, scoring='roc_auc', cv=5).mean()
        return res
    cv_params ={'max_depth': (5, 12),
                'gamma': (0.001, 10.0),
                'min_child_weight': (0, 20),
                'max_delta_step': (0, 10),
                'subsample': (0.4, 1.0),
                'colsample_bytree': (0.4, 1.0)}
    xgb_op = BayesianOptimization(xgb_cv,cv_params)
    xgb_op.maximize(n_iter=20)
    print(xgb_op.max)

    t2=time.perf_counter()
    print('耗时：',(t2-t1))

    return xgb_op.max

#LGB调参
def BO_lgb(x,y):         #调参
    t1=time.perf_counter()

    def lgb_cv(max_depth,
          num_leaves,
          min_data_in_leaf,
          feature_fraction,
          bagging_fraction,
          lambda_l2):
        paramt={'num_leaves': int(num_leaves),
            'min_data_in_leaf': int(min_data_in_leaf),
            'objective':'regression',
            'max_depth': int(max_depth),
            'learning_rate': 0.01,
            "boosting": "gbdt",
            "feature_fraction": feature_fraction,
            "bagging_freq": 1,
            "bagging_fraction": bagging_fraction ,
            "bagging_seed": 11,
            "metric": 'auc',
            "lambda_l2": lambda_l2,
            "verbosity": -1}
        model=LGBMRegressor(**paramt)
        res = cross_val_score(model,x, y, scoring='roc_auc', cv=3).mean()
        return res
    cv_params ={'max_depth': (4, 10),                       #参数范围选择
    'num_leaves': (5, 130),
    'min_data_in_leaf': (10, 80),
    'feature_fraction': (0.7, 1.0),
    'bagging_fraction': (0.7, 1.0),
    'lambda_l2': (3, 11)}
    lgb_op = BayesianOptimization(lgb_cv,cv_params)         #（模型，参数名）
    lgb_op.maximize(n_iter=20)                              #开始，进行20次
    print(lgb_op.max)                                       #打印最大值

    t2=time.perf_counter()
    print('耗时：',(t2-t1))

    return lgb_op.max

#LGB模型
def makelgb(): #决策树模型--传统的梯度提升决策树
    lgbr = LGBMClassifier(num_leaves=   123#114    #叶子的数量——叶节点数
                         ,max_depth=10  #9     #指定树的最大深度
                         ,learning_rate=.02     #学习率设置
                         ,n_estimators=1000     #学习器训练数量
                         ,subsample_for_bin=5000    #构建仓位的样本数
                         ,min_child_samples=200     #一个叶子上数据的最小数量. 可以用来处理过拟合.
                         ,colsample_bytree=.2       #构建每棵树时的列的子样本比例
                         ,reg_alpha=.1      #权重上的L1正则化项，误差取值，防止过拟合
                         ,reg_lambda=.1     #权重上的L2正则化项，同上
                         ,lambda_l2=9  #3
                         ,min_data_in_leaf=26   #52
                         ,feature_fraction =0.7 #0.75
                         ,bagging_fraction=1 #0.79
                         )
    return lgbr

#ROC计分————准度计算
def roc(m,x,y,name):

    y_pred=m.predict_proba(x)[:,1]
    """"预测并计算roc的相关指标"""
    fpr, tpr, threshold = metrics.roc_curve(y, y_pred)
    roc_auc = metrics.auc(fpr, tpr)
    print(name+'AUC：{}'.format(roc_auc))
    """画出roc曲线图"""
    plt.figure(figsize=(8, 8))
    plt.title(name)
    plt.plot(fpr, tpr, 'b', label = name + 'AUC = %0.4f' % roc_auc)
    plt.ylim(0,1)
    plt.xlim(0,1)
    plt.legend(loc='best')
    plt.title('ROC')
    plt.ylabel('True Positive Rate')
    plt.xlabel('False Positive Rate')
    # 画出对角线
    plt.plot([0,1],[0,1],'r--')
    plt.show()

#分数提交
def prediction(m,x):
    submit=pd.read_csv('/Users/lijiahui/Downloads/sample_submit.csv')
    y_pred = m.predict_proba(x)[:,1]
    submit['isDefault'] = y_pred
    submit.to_csv('prediction.csv', index=False)


if __name__ == '__main__':
    train = pd.read_csv('/Users/lijiahui/Downloads/train.csv')
    test = pd.read_csv('/Users/lijiahui/Downloads/testA.csv')
    # profiling(train,test)
    all = pd.concat([train, test], axis=0, ignore_index=True)

    train_x, train_y, vel_y, vel_x, test=Feature_processing(all)
#使用pandas-profile
    #profile_mini = ProfileReport(train,minimal=True)
    #profile_mini.to_file(".profile_mini.html")

#XGB调参
    #paramet = BO_xgb(train_x,train_y)
#XGB应用
    #model=GS(train_x,train_y,adjust=False)


#LGB调参
    #model = BO_lgb(train_x, train_y)
#应用LGB
    model=makelgb()
    model.fit(train_x,train_y)

#应用于调参分离，一般调好参数后注释【#】掉对应代码，然后去掉模型的注释代码
    #比如说（应用LGB）：1、调参：model = BO_lgb(train_x, train_y)      2、使用模型：一、将参数填入模型中；二、#model = BO_lgb(train_x, train_y)---注释掉
                            # model=makelgb()                                                      model=makelgb()
                            # model.fit(train_x,train_y)                                           model.fit(train_x,train_y)----去除注释




    roc(model,train_x,train_y,name='train')
    roc(model,vel_x,vel_y,name='Validation')

    prediction(model,test)
trainAUC：0.7822860069065342
ValidationAUC：0.7353729670411389