import pandas as pd
import os
import gc
import lightgbm as lgb
import xgboost as xgb
from catboost import CatBoostRegressor
import numpy as np
from tqdm import tqdm
from sklearn.model_selection import StratifiedKFold, KFold
from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, log_loss
import time
import warnings

pd.set_option('max_colwidth', 200)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
warnings.filterwarnings('ignore')


# 读取训练集、测试集
def get_feature(num, data_label=None):
    path = '../data/feature/'

    register = pd.read_csv(path + 'register_feature_' + str(num) + '.csv')
    create = pd.read_csv(path + 'create_feature_' + str(num) + '.csv')
    launch = pd.read_csv(path + 'launch_feature_' + str(num) + '.csv')
    activity = pd.read_csv(path + 'activity_feature_' + str(num) + '.csv')

    feature = pd.merge(register, launch, on='user_id', how='left')
    feature = pd.merge(feature, activity, on='user_id', how='left')
    feature = pd.merge(feature, create, on='user_id', how='left')
    del register
    del create
    del launch

    if data_label is not None:
        label_name = 'label_' + str(num)
        data_label_tmp = data_label[data_label['user_id'].isin(feature['user_id'])]
        data_label_tmp = data_label.loc[:, ['user_id', label_name]]
        data_label_tmp.columns = ['user_id', 'label']
        feature = pd.merge(feature, data_label_tmp, on='user_id', how='left')

    return feature


# 读标签数据
data_label = pd.read_csv('../data/feature/data_label.csv')

# 读特征数据
test_x = get_feature('12')
train_x = get_feature('0', data_label).append(get_feature('1', data_label)).append(
    get_feature('2', data_label)).append(get_feature('3', data_label)).append(
    get_feature('4', data_label)).append(get_feature('5', data_label))
train_x = train_x.reset_index(drop=True)

train_y = train_x['label']
test_user = test_x['user_id']

# print(train_x.head())


import matplotlib.pyplot as plt
import seaborn as sns

# plt.figure(figsize=(14, 14))
# plt.title('features correlation plot (Pearson)')
# corr = train_x.corr()
# sns.heatmap(corr, xticklabels=corr.columns, yticklabels=corr.columns, linewidths=.1, cmap="Blues")
# plt.show()

# print(train_x.corr().sort_values('label', ascending=False))

from sklearn import feature_selection

feature_df = train_x[[f for f in train_x.columns if f not in ['user_id', 'label']]]
label = train_x['label']

# Filter
# 方差选择法，选择方差大于阈值的特征
features_var = feature_selection.VarianceThreshold(threshold=0.7).fit_transform(feature_df)
# print(features_var)

# Wrapper
# 递归特征消除法，这里选择逻辑回归作为基模型，n_features_to_select为选择的特征个数
from sklearn.linear_model import LogisticRegression

# features_rfe = feature_selection.RFE(estimator=LogisticRegression(), n_features_to_select=30).fit_transform(
# feature_df, label) print(features_rfe)

# Embedded
# 基于惩罚项的特征选择法,这里选择带L1惩罚项的逻辑回归作为基模型
features_lr_embed = feature_selection.SelectFromModel(LogisticRegression(penalty="l2", C=0.1)).fit_transform(feature_df,
                                                                                                             label)
# print(features_lr_embed)


# 基于树模型的特征选择法,这里选择GBDT模型作为基模型
from sklearn.ensemble import GradientBoostingClassifier

features_gbdt_embed = feature_selection.SelectFromModel(GradientBoostingClassifier()).fit_transform(feature_df, label)
# print(features_gbdt_embed)

del train_x['user_id']
del test_x['user_id']
del train_x['label']

features = train_x.columns.tolist()


def cv_model(clf, train_x, train_y, test_x, clf_name):
    folds = 5
    seed = 2022
    kf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=seed)

    train = np.zeros(train_x.shape[0])
    test = np.zeros(test_x.shape[0])

    cv_scores = []

    for i, (train_index, valid_index) in enumerate(kf.split(train_x, train_y)):
        print('************************************ {} ************************************'.format(str(i + 1)))
        trn_x, trn_y, val_x, val_y = train_x.iloc[train_index], train_y[train_index], train_x.iloc[valid_index], \
                                     train_y[valid_index]

        if clf_name == "lgb":
            train_matrix = clf.Dataset(trn_x, label=trn_y)
            valid_matrix = clf.Dataset(val_x, label=val_y)

            params = {
                'boosting_type': 'gbdt',
                'objective': 'binary',
                'metric': 'auc',
                'min_child_weight': 5,
                'num_leaves': 2 ** 5,
                'lambda_l2': 10,
                'feature_fraction': 0.8,
                'bagging_fraction': 0.8,
                'bagging_freq': 4,
                'learning_rate': 0.5,
                'seed': 2022,
                'n_jobs': -1,
                'verbose': -1,
            }

            model = clf.train(params, train_matrix, 50000, valid_sets=[train_matrix, valid_matrix],
                              categorical_feature=[], verbose_eval=2000, early_stopping_rounds=500)
            val_pred = model.predict(val_x, num_iteration=model.best_iteration)
            test_pred = model.predict(test_x, num_iteration=model.best_iteration)

            print(list(sorted(zip(features, model.feature_importance("gain")), key=lambda x: x[1], reverse=True))[:20])

        if clf_name == "xgb":
            train_matrix = clf.DMatrix(trn_x, label=trn_y)
            valid_matrix = clf.DMatrix(val_x, label=val_y)
            test_matrix = clf.DMatrix(test_x)

            params = {'booster': 'gbtree',
                      'objective': 'binary:logistic',
                      'eval_metric': 'auc',
                      'gamma': 1,
                      'min_child_weight': 1.5,
                      'max_depth': 5,
                      'lambda': 10,
                      'subsample': 0.7,
                      'colsample_bytree': 0.7,
                      'colsample_bylevel': 0.7,
                      'eta': 0.5,
                      'tree_method': 'exact',
                      'seed': 2022,
                      'nthread': 36
                      }

            watchlist = [(train_matrix, 'train'), (valid_matrix, 'eval')]

            model = clf.train(params, train_matrix, num_boost_round=50000, evals=watchlist, verbose_eval=2000,
                              early_stopping_rounds=200)
            val_pred = model.predict(valid_matrix, ntree_limit=model.best_ntree_limit)
            test_pred = model.predict(test_matrix, ntree_limit=model.best_ntree_limit)

        if clf_name == "cat":
            params = {'learning_rate': 0.5, 'depth': 5, 'l2_leaf_reg': 10, 'bootstrap_type': 'Bernoulli',
                      'od_type': 'Iter', 'od_wait': 50, 'random_seed': 11, 'allow_writing_files': False}

            model = clf(iterations=20000, **params)
            model.fit(trn_x, trn_y, eval_set=(val_x, val_y),
                      cat_features=[], use_best_model=True, verbose=2000)

            val_pred = model.predict(val_x)
            test_pred = model.predict(test_x)

        train[valid_index] = val_pred
        test += test_pred / kf.n_splits
        cv_scores.append(roc_auc_score(val_y, val_pred))

        print(cv_scores)

    print("%s_scotrainre_list:" % clf_name, cv_scores)
    print("%s_score_mean:" % clf_name, np.mean(cv_scores))
    print("%s_score_std:" % clf_name, np.std(cv_scores))
    return train, test


def lgb_model(x_train, y_train, x_test):
    lgb_train, lgb_test = cv_model(lgb, x_train, y_train, x_test, "lgb")
    return lgb_train, lgb_test


def xgb_model(x_train, y_train, x_test):
    xgb_train, xgb_test = cv_model(xgb, x_train, y_train, x_test, "xgb")
    return xgb_train, xgb_test


def cat_model(x_train, y_train, x_test):
    cat_train, cat_test = cv_model(CatBoostRegressor, x_train, y_train, x_test, "cat")
    return cat_train, cat_test


lgb_train, lgb_test = lgb_model(train_x, train_y, test_x)
xgb_train, xgb_test = xgb_model(train_x, train_y, test_x)
cat_train, cat_test = cat_model(train_x, train_y, test_x)


def stack_model(oof_1, oof_2, oof_3, predictions_1, predictions_2, predictions_3, y):
    train_stack = np.vstack([oof_1, oof_2, oof_3]).transpose()
    test_stack = np.vstack([predictions_1, predictions_2, predictions_3]).transpose()

    from sklearn.linear_model import BayesianRidge
    from sklearn.model_selection import RepeatedKFold
    folds = RepeatedKFold(n_splits=5, n_repeats=2, random_state=2022)

    oof = np.zeros(train_stack.shape[0])
    predictions = np.zeros(test_stack.shape[0])

    for fold_, (trn_idx, val_idx) in enumerate(folds.split(train_stack, y)):
        print("fold n°{}".format(fold_ + 1))
        trn_data, trn_y = train_stack[trn_idx], y[trn_idx]
        val_data, val_y = train_stack[val_idx], y[val_idx]
        print("-" * 10 + "Stacking " + str(fold_) + "-" * 10)
        clf = BayesianRidge()
        clf.fit(trn_data, trn_y)

        oof[val_idx] = clf.predict(val_data)
        predictions += clf.predict(test_stack) / (5 * 2)
    print('mean: ', roc_auc_score(y, oof))

    return oof, predictions


stack_train, stack_test = stack_model(lgb_train, xgb_train, cat_train, lgb_test, xgb_test, cat_test, train_y)
