import lightgbm as lgb
from tiancheng.base.base_helper import *
from sklearn.model_selection import StratifiedKFold


# op_train = get_operation_train_new()
# trans_train = get_transaction_train_new()
#
# op_test = get_operation_round1_new() # pd.read_csv('input/operation_round1_new.csv')
# trans_test = get_transaction_round1_new() #pd.read_csv('input/transaction_round1_new.csv')
# y = get_tag_train_new() # pd.read_csv('input/tag_train_new.txt')
sub = get_sub()




# train = get_feature(op_train, trans_train, y)
# test = get_feature(op_test, trans_test, sub)
# train = fill_mean(train)
# test = fill_mean(test)

# test = test.loc[:,~test.columns.duplicated()]
# train = train.loc[:,~train.columns.duplicated()]
# train = get_train()
# test = get_test()
# print(test.shape)
# print(train.shape)
# pd.to_pickle(train,features_base_path+"train01.pkl")
# pd.to_pickle(test,features_base_path+"test01.pkl")
# train = train.drop(['UID', 'Tag'], axis=1)
# label = y['Tag']
#
# test_id = test['UID']
# test = test.drop(['UID', 'Tag'], axis=1)
train, label, cols = get_X_y()
test = get_test_data()

skf = StratifiedKFold(n_splits=10, random_state=2018, shuffle=True)
best_score = []

oof_preds = np.zeros(train.shape[0])
sub_preds = np.zeros(test.shape[0])
res, weights = pd.DataFrame(), []
for index, (train_index, test_index) in enumerate(skf.split(train, label)):
    lgb_model = lgb.LGBMClassifier(boosting_type='gbdt', num_leaves=5000, reg_alpha=3, reg_lambda=5, max_depth=train.shape[1]-index*2,
                                   n_estimators=5000, objective='binary', subsample=0.95, colsample_bytree=0.95,
                                   subsample_freq=1, learning_rate=0.03,
                                   class_weight='balanced',
                                   random_state=1000+index, n_jobs=8, min_child_weight=8, min_child_samples=8,
                                   min_split_gain=0.2)
    lgb_model.fit(train[train_index], label[train_index], verbose=10,
                  eval_metric="binary_logloss",
                  eval_set=[(train[train_index], label[train_index]),
                            (train[test_index], label[test_index])], early_stopping_rounds=50)
    best_score.append(lgb_model.best_score_['valid_1']['binary_logloss'])
    print(best_score)
    oof_preds[test_index] = lgb_model.predict_proba(train[test_index], num_iteration=lgb_model.best_iteration_)[:,1]
    m = tpr_weight_funtion(y_predict=oof_preds[test_index], y_true=label[test_index])
    print(m)
    if m < 0.79:
        continue
    test_pred = lgb_model.predict_proba(test, num_iteration=lgb_model.best_iteration_)[:, 1]
    weights.append(m)
    res[index] = test_pred
    # sub_preds += test_pred / 5
    # print('test mean:', test_pred.mean())
    # predict_result['predicted_score'] = predict_result['predicted_score'] + test_pred

print(weights)
res.columns =  [i for i in range(res.shape[1])]
blending_model(res, weights, "baseline05")
print(123)
index = weights.index(max(weights))
sub['Tag'] = res[index]
print(max(weights))
sub.to_csv(sub_base_path + 'baseline05_%s.csv' % str(weights[index]), index=False)

