#!/usr/bin/env python3
# encoding: utf-8

import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV
from sklearn import tree
from bayes_opt import BayesianOptimization
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import SelectFromModel

from lightgbm import LGBMClassifier
from sklearn.svm import LinearSVC
from xgboost import XGBClassifier,plot_importance
from sklearn.metrics import precision_score,recall_score,f1_score,brier_score_loss,log_loss,roc_auc_score
from category_encoders import WOEEncoder ,OneHotEncoder,CatBoostEncoder,TargetEncoder
from  lightgbm import LGBMRegressor
from pandas_profiling import ProfileReport
import pandas_profiling

import time
import warnings
import toad
warnings.filterwarnings('ignore')



def profiling(train,test):
    train_y0 = train[train['isDefault'] == 0]
    train_y1 = train[train['isDefault'] == 1]
    pfr_y1 = pandas_profiling.ProfileReport(train_y1)
    # pfr_y1.to_file("./train_y1.html")

    pfr_y0 = pandas_profiling.ProfileReport(train_y0)
    # pfr_y0.to_file("./train_y0.html")

    pfr = pandas_profiling.ProfileReport(train)
    # pfr.to_file("./train.html")

    pfr_y = pandas_profiling.ProfileReport(test)
    # pfr_y.to_file("./test.html")
def process(x):


    #train = concated_df[concated_df['isDefault'].notnull()]  # notnull 去除
    train = x[x['isDefault'].notnull()]  # notnull 去除
    train_y = train.pop('isDefault')  # 删？

    test = x[x['isDefault'].isnull()]
    test_y = test.pop('isDefault')
    train_x, vel_x, train_y, vel_y = train_test_split(train, train_y, test_size=0.25, random_state=0)

    return train_x, train_y, vel_y, vel_x, test

def GS(x,y,adjust=False):       #模型调参非贝叶斯
    t1=time.perf_counter()
    other_params = {'booster': 'gbtree','eta': 0.1,'nthread': 4,'eval_metric': 'auc','objective': 'binary:logistic',
                    'colsample_bytree': 0.4354, 'gamma': 9.888, 'max_delta_step': 4,'n_estimators':1000,'learning_rate':0.02,
                    'max_depth': 10, 'min_child_weight': 3.268, 'subsample': 0.7157}
    m = XGBClassifier(other_params)
    if adjust==False:
        m.fit(x, y)
        plot_importance(m, max_num_features=25, height=0.5)
        plt.show()
    if adjust==True:
        cv_params = {}
        m = GridSearchCV(estimator=m, param_grid=cv_params, scoring='roc_auc', cv=2)
        m.fit(x, y)
        evalute_result = m.cv_results_
        print('每轮迭代运行结果:{0}'.format(evalute_result))
        best_params = m.best_params_
        best_score = m.best_score_
        print(best_params, best_score)

        t2=time.perf_counter()
        print('耗时：',(t2-t1))
        return m
def BO_xgb(x,y):
    t1=time.perf_counter()

    def xgb_cv(max_depth,gamma,min_child_weight,max_delta_step,subsample,colsample_bytree):
        paramt={'booster': 'gbtree',
                'max_depth': int(max_depth),
                'gamma': gamma,
                'eta': 0.1,
                'objective': 'binary:logistic',
                'nthread': 4,
                'eval_metric': 'auc',
                'subsample': max(min(subsample, 1), 0),
                'colsample_bytree': max(min(colsample_bytree, 1), 0),
                'min_child_weight': min_child_weight,
                'max_delta_step': int(max_delta_step),
                'seed': 1001}
        model=XGBClassifier(**paramt)
        res = cross_val_score(model,x, y, scoring='roc_auc', cv=5).mean()
        return res
    cv_params ={'max_depth': (5, 12),
                'gamma': (0.001, 10.0),
                'min_child_weight': (0, 20),
                'max_delta_step': (0, 10),
                'subsample': (0.4, 1.0),
                'colsample_bytree': (0.4, 1.0)}
    xgb_op = BayesianOptimization(xgb_cv,cv_params)
    xgb_op.maximize(n_iter=20)
    print(xgb_op.max)

    t2=time.perf_counter()
    print('耗时：',(t2-t1))

    return xgb_op.max
from sklearn.model_selection import GridSearchCV
import lightgbm as lgb
from sklearn.model_selection import StratifiedKFold
def BO_lgb(x,y):         #调参
    t1=time.perf_counter()

    def lgb_cv(max_depth,
          num_leaves,
          min_data_in_leaf,
          feature_fraction,
          bagging_fraction,
          lambda_l2):
        paramt={'num_leaves': int(num_leaves),
            'min_data_in_leaf': int(min_data_in_leaf),
            'objective':'regression',
            'max_depth': int(max_depth),
            'learning_rate': 0.01,
            "boosting": "gbdt",
            "feature_fraction": feature_fraction,
            "bagging_freq": 1,
            "bagging_fraction": bagging_fraction ,
            "bagging_seed": 11,
            "metric": 'auc',
            "lambda_l2": lambda_l2,
            "verbosity": -1}
        model=LGBMRegressor(**paramt)
        res = cross_val_score(model,x, y, scoring='roc_auc', cv=3).mean()
        return res
    cv_params ={'max_depth': (4, 10),                       #参数范围选择
    'num_leaves': (5, 130),
    'min_data_in_leaf': (10, 80),
    'feature_fraction': (0.7, 1.0),
    'bagging_fraction': (0.7, 1.0),
    'lambda_l2': (3, 11)}
    lgb_op = BayesianOptimization(lgb_cv,cv_params)         #（模型，参数名）
    lgb_op.maximize(n_iter=20)                              #开始，进行20次
    print(lgb_op.max)                                       #打印最大值

    t2=time.perf_counter()
    print('耗时：',(t2-t1))

    return lgb_op.max


def BO_lgb1(x,y):         #调参
    t1=time.perf_counter()

    def get_best_cv_params(learning_rate=0.1, n_estimators=581, num_leaves=31, max_depth=-1, bagging_fraction=1.0,
                           feature_fraction=1.0, bagging_freq=0, min_data_in_leaf=20, min_child_weight=0.001,
                           min_split_gain=0, reg_lambda=0, reg_alpha=0, param_grid=None):
        # 设置5折交叉验证
        cv_fold = StratifiedKFold(n_splits=5, random_state=0, shuffle=True, )

        model_lgb = lgb.LGBMClassifier(learning_rate=learning_rate,
                                       n_estimators=n_estimators,
                                       num_leaves=num_leaves,
                                       max_depth=max_depth,
                                       bagging_fraction=bagging_fraction,
                                       feature_fraction=feature_fraction,
                                       bagging_freq=bagging_freq,
                                       min_data_in_leaf=min_data_in_leaf,
                                       min_child_weight=min_child_weight,
                                       min_split_gain=min_split_gain,
                                       reg_lambda=reg_lambda,
                                       reg_alpha=reg_alpha,
                                       n_jobs=8
                                       )
        grid_search = GridSearchCV(estimator=model_lgb,
                                   cv=cv_fold,
                                   param_grid=param_grid,
                                   scoring='roc_auc'
                                   )
        grid_search.fit(train_x, train_y)

        print('模型当前最优参数为:{}'.format(grid_search.best_params_))
        print('模型当前最优得分为:{}'.format(grid_search.best_score_))

    t2=time.perf_counter()
    print('耗时：',(t2-t1))

    """设置n_estimators 为581，调整num_leaves和max_depth，这里选择先粗调再细调"""
    lgb_params = {'num_leaves': range(10, 80, 5), 'max_depth': range(3, 10, 2)}
    get_best_cv_params(learning_rate=0.1, n_estimators=581, num_leaves=None, max_depth=None, min_data_in_leaf=20,
                       min_child_weight=0.001, bagging_fraction=1.0, feature_fraction=1.0, bagging_freq=0,
                       min_split_gain=0, reg_lambda=0, reg_alpha=0, param_grid=lgb_params)
    """num_leaves为30，max_depth为7，进一步细调num_leaves和max_depth"""
    lgb_params = {'num_leaves': range(25, 35, 1), 'max_depth': range(5, 9, 1)}
    get_best_cv_params(learning_rate=0.1, n_estimators=85, num_leaves=None, max_depth=None, min_data_in_leaf=20,
                       min_child_weight=0.001, bagging_fraction=1.0, feature_fraction=1.0, bagging_freq=0,
                       min_split_gain=0, reg_lambda=0, reg_alpha=0, param_grid=lgb_params)


def makelgb(): #决策树模型--传统的梯度提升决策树
    lgbr = LGBMClassifier(num_leaves=129        #114    #叶子的数量——叶节点数
                         ,max_depth=9 #9     #指定树的最大深度
                         ,learning_rate=.02     #学习率设置
                         ,n_estimators=1000     #学习器训练数量
                         ,subsample_for_bin=5000    #构建仓位的样本数
                         ,min_child_samples=400     #200#一个叶子上数据的最小数量. 可以用来处理过拟合.
                         ,colsample_bytree=0.4      #.2 构建每棵树时的列的子样本比例
                         ,reg_alpha=.1      #权重上的L1正则化项，误差取值，防止过拟合
                         ,reg_lambda=.4     #0.1    #权重上的L2正则化项，同上
                         ,lambda_l2=10.946371648408919  #3
                         ,min_data_in_leaf=45   #52
                         ,feature_fraction =0.7122431634305788 #0.75
                         ,bagging_fraction=0.76136302565464 #0.79
                         )
    return lgbr

def roc(m,x,y,name):

    y_pred=m.predict_proba(x)[:,1]
    """"预测并计算roc的相关指标"""
    fpr, tpr, threshold = metrics.roc_curve(y, y_pred)
    roc_auc = metrics.auc(fpr, tpr)
    print(name+'AUC：{}'.format(roc_auc))
    """画出roc曲线图"""
    plt.figure(figsize=(8, 8))
    plt.title(name)
    plt.plot(fpr, tpr, 'b', label = name + 'AUC = %0.4f' % roc_auc)
    plt.ylim(0,1)
    plt.xlim(0,1)
    plt.legend(loc='best')
    plt.title('ROC')
    plt.ylabel('True Positive Rate')
    plt.xlabel('False Positive Rate')
    # 画出对角线
    plt.plot([0,1],[0,1],'r--')
    plt.show()
def prediction(m,x):
    submit=pd.read_csv('sample_submit.csv')
    y_pred = m.predict_proba(x)[:,1]
    submit['isDefault'] = y_pred
    submit.to_csv('prediction.csv', index=False)


if __name__ == '__main__':
    train = pd.read_csv('train.csv')
    test = pd.read_csv('testA.csv')
    #pd.read_csv(‘test.csv’, encoding =‘ANSI’)
    trainy0=pd.read_csv('new_train_data_FE8.csv')
    trainy1=pd.read_csv('new_test_data_FE8.csv')
    all = pd.concat([trainy0,trainy1], axis=0, ignore_index=True)

    '''
    if hasattr(all, 'decode'):
        # r = r.decode('utf-8')
        r = all.decode('utf-8', 'ignore')'''

    # profiling(train,test)
    #all = pd.concat([train, test], axis=0, ignore_index=True)
    #all = pd.concat([train],axis=0, ignore_index=True)

    train_x, train_y, vel_y, vel_x, test=process(all)


    #profile_mini = ProfileReport(train,minimal=True)
    #profile_mini.to_file(".处理缺失？_mini.html")

    #model = BO_xgb(train_x,train_y)
    #model=GS(train_x,train_y,adjust= False)
    model=makelgb()
    model.fit(train_x,train_y)
    #model = BO_lgb(train_x, train_y)

    roc(model,train_x,train_y,name='train')
    roc(model,vel_x,vel_y,name='Validation')

    #prediction(model,test)