# encoding=utf-8

# sys.path.append("../main")
# https://blog.csdn.net/songzhilian22/article/details/49636725
# https://www.libinx.com/2018/text-classification-classic-ml-by-sklearn/

import os

import pandas as pd
from sklearn.model_selection import StratifiedKFold
from xgboost import XGBClassifier

import src.core.PandasUtils as pandasutils
import src.core.CacheUtils as cacheutils

# values = data.values
# 做数据的特征工程
path = '../../dist/train_data_full.pkl'
if cacheutils.model_exist(path):
    print("load model from ", path)
    df = cacheutils.load_model(path)
else:
    print("start data conversion")
    data = pd.read_csv("../../data/training.csv")
    data.columns = ["type", "text"]
    data["type"] = data["type"].map(lambda s: int(s) - 1)
    values = data.values
    df = pandasutils.convert2KeywordsDataframs(values)
    cacheutils.save_model(df, path)

# print(df["X"])
# print(df["y"])

import numpy as np

y_train = df["y"]
X_train = np.array(df["X"])

import xgboost as xgb

# 直接调用xgboost内嵌的交叉验证（cv），可对连续的n_estimators参数进行快速交叉验证
# 而GridSearchCV只能对有限个参数进行交叉验证
def modelfit(alg, X_train, y_train, cv_folds=None, early_stopping_rounds=10):
    xgb_param = alg.get_xgb_params()
    xgb_param['num_class'] = 11

    # 直接调用xgboost，而非sklarn的wrapper类
    xgtrain = xgb.DMatrix(X_train, label=y_train)

    cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], folds=cv_folds,
                      metrics='mlogloss', early_stopping_rounds=early_stopping_rounds)

    cvresult.to_csv('../../dist/1_nestimators.csv', index_label='n_estimators')

    # 最佳参数n_estimators
    n_estimators = cvresult.shape[0]

    # 采用交叉验证得到的最佳参数n_estimators，训练模型
    # alg.set_params(n_estimators=n_estimators)
    # alg.fit(X_train, y_train, eval_metric='mlogloss')
    print("best n_estimators is  ", n_estimators)


# prepare cross validation
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=3)
xgb1 = XGBClassifier(
    learning_rate=0.1,
    n_estimators=1000,  # 数值大没关系，cv会自动返回合适的n_estimators
    max_depth=5,
    min_child_weight=1,
    gamma=0,
    subsample=0.3,
    colsample_bytree=0.8,
    colsample_bylevel=0.7,
    objective='multi:softmax',
    seed=3)

modelfit(xgb1, X_train, y_train, cv_folds=kfold)
