'''
最近正在筹备一个网站，初步计划是分享一些资料，后续发展再议。 网站：http://39.107.234.237/
'''

# 导入数据包
import warnings

warnings.filterwarnings('ignore')
import lightgbm as lgb

# 采取k折模型方案
from sklearn.model_selection import StratifiedKFold
from tiancheng.base.base_helper import *

n_splits = 10
seed = 42
X_train, y_train, cols = get_X_y()
n_subsets = 23  #
test = get_test_data()
size = test.shape[0]
# X[np.random.choice(X.shape[0], 2, replace=False), :]
print(size)
print(test.shape)
# lgb 参数
max_depth = 60
subsample = 0.77
num_round = 5000
early_stopping_rounds = 50
params = {
    'boosting_type': 'gbdt',
    'objective': 'binary',
    'metric': {'binary_logloss', 'auc'},  # 二进制对数损失
    'num_leaves': 100,
    'max_depth': 8,
    'min_data_in_leaf': 250,
    'learning_rate': 0.03,
    'feature_fraction': 0.9,
    'bagging_fraction': 0.95,
    'bagging_freq': 5,
    'lambda_l1': 1,
    'lambda_l2': 0.001,  # 越小l2正则程度越高
    'min_gain_to_split': 0.2,
    'verbose': 5,
    'is_unbalance': True
}


# params['metric'] = 'auc'
def log_loss(preds, labels):
    ''' logarithmic loss with non-necessarily-binary labels '''
    log_likelihood = np.sum(labels * np.log(preds)) / len(preds)
    return -log_likelihood


res, weights, model_name = pd.DataFrame(), [], 'lgb04'

skf = StratifiedKFold(n_splits=n_splits, random_state=seed, shuffle=True)


# 自定义F1评价函数
# def f1_score_vali(preds, data_vali):
#     labels = data_vali.get_label()
#     preds = np.argmax(preds.reshape(1, -1), axis=0)
#     # kappa_score = cohen_kappa_score(labels, preds)
#     score_vali = f1_score(y_true=labels, y_pred=preds, average='weighted')
#     return 'f1_score', score_vali**2, True


def f1_score_vali(preds, data_vali):  # preds, data_vali
    y_true = data_vali.get_label().tolist()
    y_predict = preds.tolist()
    d = pd.DataFrame()
    d['prob'] = y_predict
    d['y'] = y_true
    d = d.sort_values(['prob'], ascending=[0])
    y = d.y
    PosAll = pd.Series(y).value_counts()[1]
    NegAll = pd.Series(y).value_counts()[0]
    pCumsum = d['y'].cumsum()
    nCumsum = np.arange(len(y)) - pCumsum + 1
    pCumsumPer = pCumsum / PosAll
    nCumsumPer = nCumsum / NegAll
    TR1 = pCumsumPer[abs(nCumsumPer - 0.001).idxmin()]
    TR2 = pCumsumPer[abs(nCumsumPer - 0.005).idxmin()]
    TR3 = pCumsumPer[abs(nCumsumPer - 0.01).idxmin()]
    score_vali = 0.4 * TR1 + 0.3 * TR2 + 0.3 * TR3
    return 'f1_score', score_vali, True

def train_data(X,y):
    """

    :return:
    """

    for index, (train_index, test_index) in enumerate(skf.split(X, y)):
        print(index)

        X_train, X_valid, y_train, y_valid = X[train_index], X[test_index], y[train_index], y[test_index]

        train_data = lgb.Dataset(X_train, label=y_train)
        validation_data = lgb.Dataset(X_valid, label=y_valid)
        clf = lgb.train(params=params, train_set=train_data, num_boost_round=num_round, valid_sets=[validation_data],
                        early_stopping_rounds=early_stopping_rounds,
                        feval=f1_score_vali, verbose_eval=10)
        ytestPre = clf.predict(X_valid, num_iteration=clf.best_iteration)
        m = tpr_weight_funtion(y_valid, ytestPre)
        if m > 0.77:
            weights.append(m)
            y_test = clf.predict(test, num_iteration=clf.best_iteration)
            res[index] = y_test

w_1 = []
res1 = pd.DataFrame()


def xx(i, x):
    if x > 0.77:
        w_1.append(x)
        res1[i] = res[i]


[xx(i, x) for i, x in enumerate(weights)]
# weights = [0.7770731707317073, 0.7602941176470589, 0.7117647058823529, 0.7612745098039215, 0.7029411764705882, 0.678921568627451, 0.6965686274509804, 0.6044117647058823, 0.6995098039215686, 0.7901960784313726, 0.671078431372549, 0.7485294117647059, 0.7083333333333333, 0.6813725490196079, 0.7313725490196079, 0.7598039215686275, 0.6941176470588235, 0.7617647058823529, 0.6642156862745099, 0.6843137254901961, 0.6867647058823529]
res.to_csv(sub_base_path + "res_baseline04.csv", index=False)
# blending_model(res, weights, model_name)
blending_model(res1, w_1, model_name)
