##对抗性验证
df_train = data_train.copy()
df_test = data_test.copy()
df_train['target'] = 0
df_test['target'] = 1
train_test = pd.concat([df_train, df_test], axis =0)
target = train_test['target'].values
param = {
        'num_leaves': 50,
        'objective':'binary',
        'max_depth': 5,
        'learning_rate': 0.001,
        "boosting": "gbdt",
        "metric": 'auc',
        }

folds = KFold(n_splits=5, shuffle=True, random_state=15)
oof = np.zeros(len(train_test))


for fold_, (trn_idx, val_idx) in enumerate(folds.split(train_test.values, target)):
    print("fold n°{}".format(fold_+1))
    trn_data = lgb.Dataset(train_test.iloc[trn_idx][feats], label=target[trn_idx])
    val_data = lgb.Dataset(train_test.iloc[val_idx][feats], label=target[val_idx])

    num_round = 1000
    clf = lgb.train(param, trn_data, num_round, valid_sets = [trn_data, val_data], verbose_eval=100, early_stopping_rounds = 100)
    oof[val_idx] = clf.predict(train_test.iloc[val_idx][feats], num_iteration=clf.best_iteration)
print("finished!")

###
threshold = 0.374
train_test['adver'] = oof
train_test['target'][train_test['adver']>threshold] = 1
train_test['target'][train_test['adver']<=threshold] = 0
data_train['target'] = train_test[:data_train.shape[0]]['target']
print(data_train['target'].value_counts())
data_train[['id', 'target']].to_csv('work/features/adversialdata.csv', index=False)

###
data_train.drop(['target'],inplace=True,axis=1)
data_train = data_train.merge(adver, how='inner', on='id')

fs=feature_importance_df[feature_importance_df>0].index
train_df = data_train[data_train['target']==0]
test_df = data_train[data_train['target']==1]
test_df.drop(['target'], inplace=True, axis=1)
df_train = train_df.copy()
df_test = test_df.copy()

target = df_train['label'].values

param = lgb_params

folds = StratifiedKFold(n_splits=5, shuffle=True, random_state=2020)
oof = np.zeros(len(test_df))


for fold_, (trn_idx, val_idx) in enumerate(folds.split(df_train.values, target)):
    print("fold n°{}".format(fold_))
    trn_data = lgb.Dataset(df_train.iloc[trn_idx][feats], label=target[trn_idx])
    val_data = lgb.Dataset(df_train.iloc[val_idx][feats], label=target[val_idx])

    num_round = 10000
    clf = lgb.train(param, trn_data, num_round, valid_sets = [trn_data, val_data], verbose_eval=100, early_stopping_rounds = 100, feval=lgb_f1_score)
    oof += clf.predict(test_df[feats], num_iteration=clf.best_iteration)/5

valid_f1 = f1_score(test_df['label'],  (oof>0.5).astype(int))
valid_p = precision_score(test_df['label'],  (oof>0.5).astype(int))
valid_r = recall_score(test_df['label'],  (oof>0.5).astype(int))
valid_auc = roc_auc_score(test_df['label'],  (oof>0.5).astype(int))
F = valid_p*0.7+valid_r*0.2+valid_f1*0.1
print("对抗性验证加权分数：", F)


###
print("|{}  {}|{}||".format(cv_score_lgb,cv_score_xgb, F))
printresult(lgb_test, 'lgb')
printresult(xgb_test, 'xgb')
printresult(geo_test, 'geo')
submitresult(geo_test, '83277geo_11_16')


###贝叶斯调参
def search_lgb(num_leaves,
                 max_depth,
                 max_bin,
                 bagging_fraction,
                 bagging_freq,
                 feature_fraction,
                 min_split_gain,
                 min_child_samples,
                 min_child_weight,
                 lambda_l2,
                 lambda_l1,
                 learning_rate,
                 min_data_in_leaf):
    offline_score = []
    train_x = data_train[feats]
    test_x = data_test[feats]
    train_y = data_train['label']
    folds = 5
    kf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=2020)

    train = np.zeros(train_x.shape[0])
    test = np.zeros(test_x.shape[0])
    
    num_leaves = int(num_leaves)
    max_depth = int(max_depth)
    max_bin = int(max_bin)
    bagging_freq = int(bagging_freq)
    min_child_samples = int(min_child_samples)
    min_data_in_leaf = int(min_data_in_leaf)

    cv_scores = []
    for i, (train_index, valid_index) in enumerate(kf.split(train_x, train_y)):
        #print('************************************ {} ************************************'.format(str(i+1)))
        trn_x, trn_y, val_x, val_y = train_x.iloc[train_index], train_y[train_index], train_x.iloc[valid_index], train_y[valid_index]

        train_matrix = lgb.Dataset(trn_x, label=trn_y)
        valid_matrix = lgb.Dataset(val_x, label=val_y)
    
        params = {
            'learning_rate': learning_rate,
            'boosting_type': 'gbdt',
            'objective': 'binary',
            'num_leaves': num_leaves,
            'max_depth':max_depth,
            'bagging_fraction': bagging_fraction,
            'feature_fraction':feature_fraction,
            'bagging_seed':2020,
            'min_data_in_leaf':min_data_in_leaf,
            'tree_learner':'voting',
            'reg_lambda':8,
            'verbose': -1,
            'nthread': 8,
            'colsample_bytree':0.77,
            'min_child_weight':min_child_weight,
            'min_child_samples':min_child_samples,
            'min_split_gain':min_split_gain,
            'lambda_l1': lambda_l1,
            'max_bin':max_bin,
            'bagging_freq':bagging_freq,
            'lambda_l2':lambda_l2,
        }

        model = lgb.train(params, train_matrix, 1000, valid_sets=[valid_matrix], verbose_eval=100, early_stopping_rounds=200,feval=lgb_f1_score)
        val_pred = model.predict(val_x, num_iteration=model.best_iteration)
        offline_score.append(model.best_score['valid_0']['f1'])
    print(offline_score)
    return  sum(offline_score)/folds
from bayes_opt import BayesianOptimization
pbounds = {'num_leaves': (5,100),
    'min_data_in_leaf': (5, 150),
    'learning_rate': (0.01, 0.3),
    'lambda_l1': (0, 5), 
    'max_depth': (6, 12), 
    'max_bin': (30, 80), 
    'bagging_fraction': (0.6, 1.0), 
    'bagging_freq': (1, 50), 
    'feature_fraction': (0.5, 0.8), 
    'min_split_gain': (0.0, 1.0), 
    'min_child_samples': (25, 125), 
    'min_child_weight': (0.0, 1.0), 
    'lambda_l2': (0.0, 3.0)}

optimizer = BayesianOptimization(
    f=search_lgb,
    pbounds=pbounds,
    random_state=2020,
)
optimizer.maximize(
    init_points = 5,
    n_iter = 800
)
print('-'*130)
print('Final Results')
print('Maximum  value: %f' % optimizer.max['target'])
print('Best  parameters: ', optimizer.max['params'])

Maximum  value: 0.844252


'bagging_fraction': 0.8005358711303714, 'bagging_freq': 1.8769441721990012, 'feature_fraction': 0.5504779502843268, 'lambda_l1': 2.5475225198526372, 'lambda_l2': 1.4960494419050065, 'learning_rate': 0.23623233221859305, 'max_bin': 30.558404027838126, 'max_depth': 11.705618442039626, 'min_child_samples': 41.248916750345366, 'min_child_weight': 0.9993336991412968, 'min_data_in_leaf': 13.73766196726757, 'min_split_gain': 0.01810624502124203, 'num_leaves': 96.6614718728411
