import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV
from sklearn import tree
from bayes_opt import BayesianOptimization
from sklearn.model_selection import cross_val_score
from sklearn.feature_selection import SelectFromModel

from lightgbm import LGBMClassifier
from sklearn.svm import LinearSVC
from xgboost import XGBClassifier,plot_importance
from sklearn.metrics import precision_score,recall_score,f1_score,brier_score_loss,log_loss,roc_auc_score
from category_encoders import WOEEncoder ,OneHotEncoder,CatBoostEncoder,TargetEncoder
from  lightgbm import LGBMRegressor
from pandas_profiling import ProfileReport
import pandas_profiling
import matplotlib.pyplot as plt
import seaborn as sns

import time
import warnings

from  sklearn.impute import SimpleImputer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.feature_selection import VarianceThreshold
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import mutual_info_regression  as mir
from sklearn.preprocessing import StandardScaler

from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot  as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression as LR
warnings.filterwarnings('ignore')



def profiling(train,test):
    train_y0 = train[train['isDefault'] == 0]
    train_y1 = train[train['isDefault'] == 1]
    pfr_y1 = pandas_profiling.ProfileReport(train_y1)
    # pfr_y1.to_file("./train_y1.html")

    pfr_y0 = pandas_profiling.ProfileReport(train_y0)
    # pfr_y0.to_file("./train_y0.html")

    pfr = pandas_profiling.ProfileReport(train)
    # pfr.to_file("./train.html")

    pfr_y = pandas_profiling.ProfileReport(test)
    # pfr_y.to_file("./test.html")

def Feature_processing(x):
    numerrical = ['loanAmnt'  ,  'interestRate'  ,'installment'  ,'annualIncome'  ,'dti'  ,
                  'delinquency_2years','ficoRangeHigh'  ,'ficoRangeLow'  ,'openAcc',
                  'pubRec','pubRecBankruptcies','revolBal',  'revolUtil'  ,'totalAcc']
    nominal = [  'term'  ,'employmentTitle',   'homeOwnership',  'verificationStatus'  ,
               'purpose','postCode','regionCode','initialListStatus','applicationType',
               'title','n0','n1','n2','n3','n4','n5','n6','n7','n8',  'n9','n10','n11','n12',
               'n13',  'n14','id']
    ordinal = ['grade','subGrade'  ,'employmentLength'  ,'earliesCreditLine'  ,'issueDate'  ]
    #y = ['isDefault']

    x['issueDate'] = x.loc[:, "issueDate"].apply(lambda s: int(s[:4]))
    x['earliesCreditLine'] = x.loc[:, 'earliesCreditLine'].apply(lambda s: int(s[-4:]))
    x['CreditLine'] = x.loc[:, 'earliesCreditLine'] - x.loc[:, 'issueDate']
    #x=x[x['isDefault'].notna()]
    print(np.isnan(x['isDefault']).sum())
    def tezheng1(train):
        #// 以下是自定义的一些特征，用于衡量用户价值和创利能力
        data['avg_income'] = data['annualIncome'] / data['employmentLength']
        data['total_income'] = data['annualIncome'] * data['employmentLength']
        data['avg_loanAmnt'] = data['loanAmnt'] / data['term']
        data['mean_interestRate'] = data['interestRate'] / data['term']
        data['all_installment'] = data['installment'] * data['term']

        data['rest_money_rate'] = data['avg_loanAmnt'] / (data['annualIncome'] + 0.1)  # 287个收入为0
        data['rest_money'] = data['annualIncome'] - data['avg_loanAmnt']

        data['closeAcc'] = data['totalAcc'] - data['openAcc']
        #data['ficoRange_mean'] = (data['ficoRangeHigh'] + data['ficoRangeLow']) / 2
        #del data['ficoRangeHigh'], data['ficoRangeLow']

        data['rest_pubRec'] = data['pubRec'] - data['pubRecBankruptcies']
        data['rest_Revol'] = data['loanAmnt'] - data['revolBal']
        data['dis_time'] = data['issueDate_year'] - (2020 - data['earliesCreditLine_year'])

    def Missing_value_handling(train):
        imp_most_freq = SimpleImputer(strategy="most_frequent")

        #isDefault = train.loc[:, "isDefault"].values.reshape(-1, 1)
        employmentLength = train.loc[:, "employmentLength"].values.reshape(-1, 1)
        postCode = train.loc[:, "postCode"].values.reshape(-1, 1)
        dti = train.loc[:, "dti"].values.reshape(-1, 1)
        pubRecBankruptcies = train.loc[:, "pubRecBankruptcies"].values.reshape(-1, 1)
        revolUtil = train.loc[:, "revolUtil"].values.reshape(-1, 1)
        title = train.loc[:, "title"].values.reshape(-1, 1)
        n0 = train.loc[:, "n0"].values.reshape(-1, 1)
        n1 = train.loc[:, "n1"].values.reshape(-1, 1)
        n2 = train.loc[:, "n2"].values.reshape(-1, 1)
        n3 = train.loc[:, "n3"].values.reshape(-1, 1)
        n4 = train.loc[:, "n4"].values.reshape(-1, 1)
        n5 = train.loc[:, "n5"].values.reshape(-1, 1)
        n6 = train.loc[:, "n6"].values.reshape(-1, 1)
        n7 = train.loc[:, "n7"].values.reshape(-1, 1)
        n8 = train.loc[:, "n8"].values.reshape(-1, 1)
        n9 = train.loc[:, "n9"].values.reshape(-1, 1)
        n10 = train.loc[:, "n10"].values.reshape(-1, 1)
        n11 = train.loc[:, "n11"].values.reshape(-1, 1)
        n12 = train.loc[:, "n12"].values.reshape(-1, 1)
        n13 = train.loc[:, "n13"].values.reshape(-1, 1)
        n14 = train.loc[:, "n14"].values.reshape(-1, 1)
        employmentTitle = train.loc[:, "employmentTitle"].values.reshape(-1, 1)

        #train.loc[:, "isDefault"] = imp_most_freq.fit_transform(isDefault)
        train.loc[:, "employmentLength"] = imp_most_freq.fit_transform(employmentLength)
        train.loc[:, "postCode"] = imp_most_freq.fit_transform(postCode)
        train.loc[:, "dti"] = imp_most_freq.fit_transform(dti)
        train.loc[:, "pubRecBankruptcies"] = imp_most_freq.fit_transform(pubRecBankruptcies)
        train.loc[:, "revolUtil"] = imp_most_freq.fit_transform(revolUtil)
        train.loc[:, "title"] = imp_most_freq.fit_transform(title)
        train.loc[:, "n0"] = imp_most_freq.fit_transform(n0)
        train.loc[:, "n1"] = imp_most_freq.fit_transform(n1)
        train.loc[:, "n2"] = imp_most_freq.fit_transform(n2)
        train.loc[:, "n3"] = imp_most_freq.fit_transform(n3)
        train.loc[:, "n4"] = imp_most_freq.fit_transform(n4)
        train.loc[:, "n5"] = imp_most_freq.fit_transform(n5)
        train.loc[:, "n6"] = imp_most_freq.fit_transform(n6)
        train.loc[:, "n7"] = imp_most_freq.fit_transform(n7)
        train.loc[:, "n8"] = imp_most_freq.fit_transform(n8)
        train.loc[:, "n9"] = imp_most_freq.fit_transform(n9)
        train.loc[:, "n10"] = imp_most_freq.fit_transform(n10)
        train.loc[:, "n11"] = imp_most_freq.fit_transform(n11)
        train.loc[:, "n12"] = imp_most_freq.fit_transform(n12)
        train.loc[:, "n13"] = imp_most_freq.fit_transform(n13)
        train.loc[:, "n14"] = imp_most_freq.fit_transform(n14)
        train.loc[:, "employmentTitle"] = imp_most_freq.fit_transform(employmentTitle)

        for data in [train]:
            data['employmentLength'].replace(to_replace='10+ years', value='10 years', inplace=True)
            data['employmentLength'].replace('< 1 year', '0 years', inplace=True)
            data['employmentLength'] = data['employmentLength'].apply(lambda x: np.int8(x.split()[0]))
            #print(train['employmentLength'].value_counts(dropna=False).sort_index())
        return train
    def grace_Processing(train):
        OrdinalEncoder().fit(train.loc[:, ["grade", "subGrade"]]).categories_
        train.loc[:, ["grade", "subGrade"]] = OrdinalEncoder().fit_transform(train.loc[:, ["grade", "subGrade"]])
        return train

    '调用'
    Missing_value_handling(x)
    grace_Processing(x)
    #tezheng1(x)


    train = x[x['isDefault'].notnull()]     #notnull 去除
    train_y = train.pop('isDefault')        #删？

    test = x[x['isDefault'].isnull()]
    test_y = test.pop('isDefault')

    def find_outliers_by_3segama(data, fea):
        data_std = np.std(data[fea])
        data_mean = np.mean(data[fea])
        outliers_cut_off = data_std * 3
        lower_rule = data_mean - outliers_cut_off
        upper_rule = data_mean + outliers_cut_off
        data[fea + '_outliers'] = data[fea].apply(lambda x: str('异常值') if x > upper_rule or x < lower_rule else '正常值')
        return data

    for fea in x:
        data = find_outliers_by_3segama(x, fea)
        #print(data[fea + '_outliers'].value_counts())
        #print(data.groupby(fea + '_outliers')['isDefault'].sum())
        #print()
        #print('*' * 10)

    '处理'
    data1 = x.copy()
    fea_data = data1[
        ['id_outliers', 'loanAmnt_outliers', 'term_outliers', 'interestRate_outliers', 'installment_outliers','issueDate_outliers',''
         'grade_outliers', 'subGrade_outliers', 'employmentTitle_outliers', 'employmentLength_outliers','earliesCreditLine_outliers', 'CreditLine_outliers',
         'homeOwnership_outliers', 'annualIncome_outliers', 'verificationStatus_outliers', 'purpose_outliers',
         'postCode_outliers', 'regionCode_outliers', 'dti_outliers', 'delinquency_2years_outliers',
         'ficoRangeLow_outliers', 'ficoRangeHigh_outliers', 'openAcc_outliers', 'pubRec_outliers',
         'pubRecBankruptcies_outliers', 'revolBal_outliers', 'revolUtil_outliers', 'totalAcc_outliers',
         'initialListStatus_outliers', 'applicationType_outliers', 'title_outliers', 'policyCode_outliers',
         'n0_outliers', 'n1_outliers', 'n2_outliers', 'n3_outliers', 'n4_outliers', 'n5_outliers', 'n6_outliers',
         'n7_outliers', 'n8_outliers', 'n9_outliers', 'n10_outliers', 'n11_outliers', 'n12_outliers', 'n13_outliers',
         'n14_outliers', 'isDefault_outliers']]
    for fea in fea_data:
        data1 = data1[data1[fea]=='正常值']
        data1 = data1.reset_index(drop=True)
    data1 = pd.DataFrame(data1, columns=data1.columns)
    #print(data1)
    data1.head()
    data2 = data1.copy()
    for fea in fea_data:
        data2.drop(fea, axis=1, inplace=True)
    #print(data2.columns)

    '方差过滤'
    # 数值部分
    selector = VarianceThreshold()  # -----as----
    x_var0 = selector.fit_transform(data2)
    # 表头部分
    fea_name = data2.columns.values.tolist()
    fea_name_select = selector.get_support(indices=True)
    select_name = []
    for i in fea_name_select:
        select_name.append(fea_name[i])

    x_var0 = pd.DataFrame(x_var0)  # 建表
    x_var0.columns = select_name
    data_select = x_var0.copy()  # 表复制
    y = data_select['isDefault']
    data_select.drop('isDefault', axis=1, inplace=True)  # 分离预测值
    data_select.shape
    data4 = data_select.copy()  # 去除'isDefault'后的表
    y4 = y.copy()  # 对应

    # 使用F检验
    F, F_pvalue = f_classif(data4, y4)
    k1 = F.shape[0] - (F_pvalue < 0.05).sum()  # ？？？？？？
    x_fsF = SelectKBest(f_classif, k=k1).fit_transform(data4.astype('float32'), y4)

    x_mir = mir(data4, y4)  # mir
    k_mir = x_mir.shape[0] - sum(x_mir <= 0)

    skb = SelectKBest(mir, k=k_mir)
    x_mir = skb.fit_transform(data4.astype('float32'), y4)
    x_mir_fea_name = skb.get_support(indices=True)  # get_support
    select_name1 = []
    for i in x_mir_fea_name:
        select_name1.append(fea_name[i])

    x_mir = pd.DataFrame(x_mir)
    x_mir.columns = select_name1  # 表头设立

    data6 = x_mir.copy()  # 获取表

    # 数据标准化
    sc = StandardScaler()  # ----as---
    sc.fit(data6)
    data6 = sc.fit_transform(data6)

    data6 = pd.DataFrame(data6)  # 建表
    data6.columns = select_name1  # 表头
    data6.head()  # 查看表头 print
    x6 = data6.copy()  # 获取表
    y6 = y4.copy()  # 对其



    train_x, vel_x, train_y, vel_y = train_test_split(x6, y6, test_size=0.25, random_state=0)

    return train_x, train_y, vel_y, vel_x, test
def GS(x,y,adjust=False):       #模型调参非贝叶斯
    t1=time.perf_counter()
    other_params = {'booster': 'gbtree','eta': 0.1,'nthread': 4,'eval_metric': 'auc','objective': 'binary:logistic',
                    'colsample_bytree': 0.4354, 'gamma': 9.888, 'max_delta_step': 4,'n_estimators':1000,'learning_rate':0.02,
                    'max_depth': 10, 'min_child_weight': 3.268, 'subsample': 0.7157}
    m = XGBClassifier(other_params)
    if adjust==False:
        m.fit(x, y)
        plot_importance(m, max_num_features=25, height=0.5)
        plt.show()
    if adjust==True:
        cv_params = {}
        m = GridSearchCV(estimator=m, param_grid=cv_params, scoring='roc_auc', cv=2)
        m.fit(x, y)
        evalute_result = m.cv_results_
        print('每轮迭代运行结果:{0}'.format(evalute_result))
        best_params = m.best_params_
        best_score = m.best_score_
        print(best_params, best_score)

        t2=time.perf_counter()
        print('耗时：',(t2-t1))
        return m
def BO_xgb(x,y):
    t1=time.perf_counter()

    def xgb_cv(max_depth,gamma,min_child_weight,max_delta_step,subsample,colsample_bytree):
        paramt={'booster': 'gbtree',
                'max_depth': int(max_depth),
                'gamma': gamma,
                'eta': 0.1,
                'objective': 'binary:logistic',
                'nthread': 4,
                'eval_metric': 'auc',
                'subsample': max(min(subsample, 1), 0),
                'colsample_bytree': max(min(colsample_bytree, 1), 0),
                'min_child_weight': min_child_weight,
                'max_delta_step': int(max_delta_step),
                'seed': 1001}
        model=XGBClassifier(**paramt)
        res = cross_val_score(model,x, y, scoring='roc_auc', cv=5).mean()
        return res
    cv_params ={'max_depth': (5, 12),
                'gamma': (0.001, 10.0),
                'min_child_weight': (0, 20),
                'max_delta_step': (0, 10),
                'subsample': (0.4, 1.0),
                'colsample_bytree': (0.4, 1.0)}
    xgb_op = BayesianOptimization(xgb_cv,cv_params)
    xgb_op.maximize(n_iter=20)
    print(xgb_op.max)

    t2=time.perf_counter()
    print('耗时：',(t2-t1))

    return xgb_op.max
def BO_lgb(x,y):         #调参
    t1=time.perf_counter()

    def lgb_cv(max_depth,
          num_leaves,
          min_data_in_leaf,
          feature_fraction,
          bagging_fraction,
          lambda_l2):
        paramt={'num_leaves': int(num_leaves),
            'min_data_in_leaf': int(min_data_in_leaf),
            'objective':'regression',
            'max_depth': int(max_depth),
            'learning_rate': 0.01,
            "boosting": "gbdt",
            "feature_fraction": feature_fraction,
            "bagging_freq": 1,
            "bagging_fraction": bagging_fraction ,
            "bagging_seed": 11,
            "metric": 'auc',
            "lambda_l2": lambda_l2,
            "verbosity": -1}
        model=LGBMRegressor(**paramt)
        res = cross_val_score(model,x, y, scoring='roc_auc', cv=3).mean()
        return res
    cv_params ={'max_depth': (4, 10),                       #参数范围选择
    'num_leaves': (5, 130),
    'min_data_in_leaf': (10, 80),
    'feature_fraction': (0.7, 1.0),
    'bagging_fraction': (0.7, 1.0),
    'lambda_l2': (3, 11)}
    lgb_op = BayesianOptimization(lgb_cv,cv_params)         #（模型，参数名）
    lgb_op.maximize(n_iter=20)                              #开始，进行20次
    print(lgb_op.max)                                       #打印最大值

    t2=time.perf_counter()
    print('耗时：',(t2-t1))

    return lgb_op.max

def makelgb(): #决策树模型--传统的梯度提升决策树
    lgbr = LGBMClassifier(num_leaves=62        #114    #叶子的数量——叶节点数
                         ,max_depth=9  #9     #指定树的最大深度
                         ,learning_rate=.02     #学习率设置
                         ,n_estimators=1000     #学习器训练数量
                         ,subsample_for_bin=5000    #构建仓位的样本数
                         ,min_child_samples=400     #200#一个叶子上数据的最小数量. 可以用来处理过拟合.
                         ,colsample_bytree=0.4      #.2 构建每棵树时的列的子样本比例
                         ,reg_alpha=.1      #权重上的L1正则化项，误差取值，防止过拟合
                         ,reg_lambda=.4     #0.1    #权重上的L2正则化项，同上
                         ,lambda_l2=4  #3
                         ,min_data_in_leaf=79   #52
                         ,feature_fraction =0.96 #0.75
                         ,bagging_fraction=0.72 #0.79
                         )
    return lgbr

def roc(m,x,y,name):

    y_pred=m.predict_proba(x)[:,1]
    """"预测并计算roc的相关指标"""
    fpr, tpr, threshold = metrics.roc_curve(y, y_pred)
    roc_auc = metrics.auc(fpr, tpr)
    print(name+'AUC：{}'.format(roc_auc))
    """画出roc曲线图"""
    plt.figure(figsize=(8, 8))
    plt.title(name)
    plt.plot(fpr, tpr, 'b', label = name + 'AUC = %0.4f' % roc_auc)
    plt.ylim(0,1)
    plt.xlim(0,1)
    plt.legend(loc='best')
    plt.title('ROC')
    plt.ylabel('True Positive Rate')
    plt.xlabel('False Positive Rate')
    # 画出对角线
    plt.plot([0,1],[0,1],'r--')
    plt.show()
def prediction(m,x):
    submit=pd.read_csv('sample_submit.csv')
    y_pred = m.predict_proba(x)[:,1]
    submit['isDefault'] = y_pred
    submit.to_csv('prediction.csv', index=False)


if __name__ == '__main__':
    train = pd.read_csv('train.csv')
    test = pd.read_csv('testA.csv')
    # profiling(train,test)
    #all = pd.concat([train, test], axis=0, ignore_index=True)
    all = pd.concat([train][test],axis=0, ignore_index=True)

    train_x, train_y, vel_y, vel_x, test=Feature_processing(all)


    #profile_mini = ProfileReport(train,minimal=True)
    #profile_mini.to_file(".处理缺失？_mini.html")

    #model = BO_xgb(train_x,train_y)
    #model=GS(train_x,train_y,adjust= False)
    #model=makelgb()
    #model.fit(train_x,train_y)
    model = BO_lgb(train_x, train_y)

    roc(model,train_x,train_y,name='train')
    roc(model,vel_x,vel_y,name='Validation')

    prediction(model,test)
