"""
author：石沙
date：2020-09-28
content：本模块用执行训练过程
"""

# 如下导入时为保证训练时的任务流能正常执行
import sys
from settings import MAIN_PATH, SRC_PATH
sys.path.extend([MAIN_PATH, SRC_PATH])


from datasets import load_book_clean, load_book_undersample_1k, FeatureContainer, load_book_augmented
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import classification_report
import lightgbm as lgb
from site_packages.utils.models import ModelOp


def train_gridsearch_cv(X_train, y_train):
    params = {
        'bagging_fraction': [0.8, 0.9],
        'bagging_freq': [2, 3],
        'min_child_samples': [18, 20, 22],
        'min_child_weight': [0.001, 0.002],
    }

    gbm = lgb.LGBMClassifier(objective='multiclass',
                             is_unbalance=True,
                             metric='multi_logloss',
                             max_depth=6,
                             num_leaves=40,
                             learning_rate=0.1,
                             feature_fraction=0.7,
                             min_child_samples=21,
                             min_child_weight=0.001,
                             bagging_fraction=1,
                             bagging_freq=2,
                             reg_alpha=0.001,
                             reg_lambda=8,
                             cat_smooth=0,
                             num_iterations=50,
                             num_threads=-1
                             )
    gsearch = GridSearchCV(gbm, param_grid=params, scoring='f1_macro', cv=3)
    gsearch.fit(X_train, y_train)
    print('参数的最佳取值:{0}'.format(gsearch.best_params_))
    print('最佳模型得分:{0}'.format(gsearch.best_score_))
    print(gsearch.cv_results_['mean_test_score'])
    print(gsearch.cv_results_['params'])
    return gsearch.best_estimator_


def train(data_type='normal'):
    # 数据加载
    load_dict = {
        'normal': load_book_clean,
        '1k': load_book_undersample_1k,
        'augment': load_book_augmented
    }
    data = load_dict[data_type]()
    labels = data[['book_id', 'label']]
    del data

    # 加载特征
    features = [
        'embedding_doc_mean',
        'embedding_doc_max',
        'ngram_embedding_2_mean',
        'ngram_embedding_2_max',
        'ngram_embedding_3_mean',
        'ngram_embedding_3_max',
        'ngram_embedding_4_mean',
        'ngram_embedding_4_max',
        'attention_embedding_mean',
        'attention_embedding_max'
    ]
    df_features = FeatureContainer.load(features, labels)
    feature_cols = [col for col in df_features.columns if col not in ['book_id', 'label']]

    # 切分
    X_train, X_test, y_train, y_test = train_test_split(
        df_features[feature_cols].values,
        df_features[['label']].values,
        test_size=0.2,
        stratify=df_features['label'].values
    )

    # 训练
    model = train_gridsearch_cv(X_train, y_train)
    y_pred = model.predict(X_test)
    print('测试报告如下：')
    print(classification_report(y_test, y_pred))
    ModelOp.save(model, 'lgbm')


if __name__ == '__main__':
    train('1k')