# -*- coding: UTF-8 -*-
import pandas as pd
import numpy as np
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV
from bayes_opt import BayesianOptimization
from sklearn.model_selection import cross_val_score

from lightgbm import LGBMClassifier
from xgboost import XGBClassifier,plot_importance
from category_encoders import WOEEncoder ,OneHotEncoder,CatBoostEncoder,TargetEncoder
import pandas_profiling
import time
import warnings
warnings.filterwarnings('ignore')

def profiling(train,test):
    train_y0 = train[train['isDefault'] == 0]
    train_y1 = train[train['isDefault'] == 1]
    pfr_y1 = pandas_profiling.ProfileReport(train_y1)
    # pfr_y1.to_file("./train_y1.html")

    pfr_y0 = pandas_profiling.ProfileReport(train_y0)
    # pfr_y0.to_file("./train_y0.html")

    pfr = pandas_profiling.ProfileReport(train)
    # pfr.to_file("./train.html")

    pfr_y = pandas_profiling.ProfileReport(test)
    # pfr_y.to_file("./test.html")
def Feature_processing(x):
    numerrical = ['loanAmnt','interestRate','installment','annualIncome','dti',
                  'delinquency_2years','ficoRangeHigh','ficoRangeLow','openAcc',
                  'pubRec','pubRecBankruptcies','revolBal','revolUtil','totalAcc']
    nominal = ['term','employmentTitle','homeOwnership','verificationStatus',
               'purpose','postCode','regionCode','initialListStatus','applicationType',
               'title','n0','n1','n2','n3','n4','n5','n6','n7','n8','n9','n10','n11','n12',
               'n13','n14','id']
    ordinal = ['grade','subGrade','employmentLength','earliesCreditLine','issueDate']
    y = ['isDefault']

    x['Income_installment']=round(x.loc[:,'annualIncome']/x.loc[:,'installment'],2)
    x['loanAmnt_installment']=round(x.loc[:,'loanAmnt']/x.loc[:,'installment'],2)
    x['debt']=round(x.loc[:,'annualIncome']*x.loc[:,'dti'],2)
    x['loanAmnt_debt']=round(x.loc[:,'annualIncome']/x.loc[:,'debt'],2)

    subGrade_sort = sorted(x.loc[:,'subGrade'].unique(), key=lambda x: (x[0], x[1]))
    x['subGrade'] = x.loc[:,'subGrade'].map(lambda i:subGrade_sort.index(i)+1)

    def employmentLength_to_int(s):
        if pd.isnull(s):
            return s
        else:
            return np.int8(s.split()[0])
    x["employmentLength"].replace(to_replace="10+ years", value="11 years", inplace=True)
    x["employmentLength"].replace(to_replace="< 1 year", value="0 years", inplace=True)
    x['employmentLength'] = x.loc[:,"employmentLength"].apply(employmentLength_to_int)

    x['issueDate'] = x.loc[:,"issueDate"].apply(lambda s: int(s[:4]))
    x['earliesCreditLine'] = x.loc[:,'earliesCreditLine'].apply(lambda s: int(s[-4:]))
    x['CreditLine'] = x.loc[:,'earliesCreditLine'] - x.loc[:,'issueDate']

    x['fico']=(x.loc[:,'ficoRangeHigh']+x.loc[:,'ficoRangeLow'])*0.5
    print(numerrical)

    numerrical=list(set(numerrical) - {'ficoRangeHigh', 'ficoRangeLow'}) + ['Income_installment','loanAmnt_installment','loanAmnt_debt','fico']
    nominal=list(set(nominal)-{'id','n10', 'n2'})
    ordinal=list(set(ordinal) - {'grade', 'earliesCreditLine', 'issueDate'}) + ['CreditLine']

    print(numerrical+nominal+ordinal+y)
    x=x[numerrical+nominal+ordinal+y]

    train = x[x['isDefault'].notnull()]
    train_y = train.pop('isDefault')

    test = x[x['isDefault'].isnull()]
    test_y = test.pop('isDefault')

    train_x, vel_x, train_y, vel_y = train_test_split(train, train_y, test_size=0.25, random_state=0)

    def Category_Encoders(train_x, train_y, test_x, vel_x):
        for col in nominal:
            distinct = train_x[col].nunique()
            if distinct < 4 and distinct >2:
                enc = OneHotEncoder(handle_missing='indicator').fit(train_x[col], train_y)
            elif distinct >= 4:
                # enc = WOEEncoder().fit(train_x[col], train_y)
                # enc = TargetEncoder().fit(train_x[col],train_y)
                enc = CatBoostEncoder().fit(train_x[col],train_y)

            train_x[col] = enc.transform(train_x[col])
            test_x[col] = enc.transform(test_x[col])
            vel_x[col] = enc.transform(vel_x[col])

        return train_x, test_x, vel_x

    train_x,test,vel_x=Category_Encoders(train_x,train_y,test,vel_x)

    return train_x, train_y, vel_y, vel_x, test
def GS(x,y,adjust=False):
    t1=time.perf_counter()
    other_params = {'booster': 'gbtree','eta': 0.1,'nthread': 4,'eval_metric': 'auc','objective': 'binary:logistic',
                    'colsample_bytree': 0.4354, 'gamma': 9.888, 'max_delta_step': 4,'n_estimators':1000,'learning_rate':0.02,
                    'max_depth': 10, 'min_child_weight': 3.268, 'subsample': 0.7157}
    m = XGBClassifier(**other_params)
    if adjust==False:
        m.fit(x, y)
        plot_importance(m, max_num_features=25, height=0.5)
        plt.show()
    if adjust==True:
        cv_params = {}
        m = GridSearchCV(estimator=m, param_grid=cv_params, scoring='roc_auc', cv=2)
        m.fit(x, y)
        evalute_result = m.cv_results_
        print('每轮迭代运行结果:{0}'.format(evalute_result))
        best_params = m.best_params_
        best_score = m.best_score_
        print(best_params, best_score)

    t2=time.perf_counter()
    print('耗时：',(t2-t1))
    return m
def BO_xgb(x,y):
    t1=time.perf_counter()

    def xgb_cv(max_depth,gamma,min_child_weight,max_delta_step,subsample,colsample_bytree):
        paramt={'booster': 'gbtree',
                'max_depth': int(max_depth),
                'gamma': gamma,
                'eta': 0.1,
                'objective': 'binary:logistic',
                'nthread': 4,
                'eval_metric': 'auc',
                'subsample': max(min(subsample, 1), 0),
                'colsample_bytree': max(min(colsample_bytree, 1), 0),
                'min_child_weight': min_child_weight,
                'max_delta_step': int(max_delta_step),
                'seed': 1001}
        model=XGBClassifier(**paramt)
        res = cross_val_score(model,x, y, scoring='roc_auc', cv=5).mean()
        return res
    cv_params ={'max_depth': (5, 12),
                'gamma': (0.001, 10.0),
                'min_child_weight': (0, 20),
                'max_delta_step': (0, 10),
                'subsample': (0.4, 1.0),
                'colsample_bytree': (0.4, 1.0)}
    xgb_op = BayesianOptimization(xgb_cv,cv_params)
    xgb_op.maximize(n_iter=20)
    print(xgb_op.max)

    t2=time.perf_counter()
    print('耗时：',(t2-t1))

    return xgb_op.max
def BO_lgb(x,y):
    t1=time.perf_counter()

    def lgb_cv(max_depth,
          num_leaves,
          min_data_in_leaf,
          feature_fraction,
          bagging_fraction,
          lambda_l2):
        paramt={'num_leaves': int(num_leaves),
            'min_data_in_leaf': int(min_data_in_leaf),
            'objective':'regression',
            'max_depth': int(max_depth),
            'learning_rate': 0.01,
            "boosting": "gbdt",
            "feature_fraction": feature_fraction,
            "bagging_freq": 1,
            "bagging_fraction": bagging_fraction ,
            "bagging_seed": 11,
            "metric": 'auc',
            "lambda_l2": lambda_l2,
            "verbosity": -1}
        model=LGBMClassifier(**paramt)
        res = cross_val_score(model,x, y, scoring='roc_auc', cv=3).mean()
        return res
    cv_params ={'max_depth': (4, 10),
    'num_leaves': (5, 130),
    'min_data_in_leaf': (10, 80),
    'feature_fraction': (0.7, 1.0),
    'bagging_fraction': (0.7, 1.0),
    'lambda_l2': (3, 11)}
    lgb_op = BayesianOptimization(lgb_cv,cv_params)
    lgb_op.maximize(n_iter=20)
    print(lgb_op.max)

    t2=time.perf_counter()
    print('耗时：',(t2-t1))

    return lgb_op.max
def roc(m,x,y,name):
    y_pred = m.predict
    #y_pred = m.predict_proba(x)[:,1]
    """"预测并计算roc的相关指标"""
    fpr, tpr, threshold = metrics.roc_curve(y, y_pred)
    roc_auc = metrics.auc(fpr, tpr)
    print(name+'AUC：{}'.format(roc_auc))
    """画出roc曲线图"""
    plt.figure(figsize=(8, 8))
    plt.title(name)
    plt.plot(fpr, tpr, 'b', label = name + 'AUC = %0.4f' % roc_auc)
    plt.ylim(0,1)
    plt.xlim(0,1)
    plt.legend(loc='best')
    plt.title('ROC')
    plt.ylabel('True Positive Rate')
    plt.xlabel('False Positive Rate')
    # 画出对角线
    plt.plot([0,1],[0,1],'r--')
    plt.show()
def prediction(m,x):
    submit=pd.read_csv('sample_submit.csv')
    y_pred = m.predict_proba(x)[:,1]
    submit['isDefault'] = y_pred
    submit.to_csv('prediction.csv', index=False)


if __name__ == '__main__':
    train = pd.read_csv('train.csv')
    test = pd.read_csv('testA.csv')
    # profiling(train,test)
    all = pd.concat([train, test], axis=0, ignore_index=True)


    train_x, train_y, vel_y, vel_x, test=Feature_processing(all)

    #model=GS(train_x,train_y,adjust=False)
    #Paramet = BO_xgb(train_x,train_y)
    LGBM = BO_lgb(train_x,train_y)

    roc(LGBM,train_x,train_y,name='train')
    roc(LGBM,vel_x,vel_y,name='Validation')

    prediction(LGBM,test)