# encoding=utf-8

# sys.path.append("../main")
# https://blog.csdn.net/songzhilian22/article/details/49636725
# https://www.libinx.com/2018/text-classification-classic-ml-by-sklearn/

import pandas as pd
from sklearn.model_selection import StratifiedKFold, GridSearchCV
from xgboost import XGBClassifier

import src.core.CacheUtils as cacheutils
import src.core.PandasUtils as pandasutils
import src.core.train as trainutils

# values = data.values
# 做数据的特征工程
path = '../../dist/train_data_full.pkl'
if cacheutils.model_exist(path):
    print("load model from ", path)
    df = cacheutils.load_model(path)
else:
    print("start data conversion")
    data = pd.read_csv("../../data/training.csv")
    data.columns = ["type", "text"]
    data["type"] = data["type"].map(lambda s: int(s) - 1)
    values = data.values
    df = pandasutils.convert2KeywordsDataframs(values)
    cacheutils.save_model(df, path)

# print(df["X"])
# print(df["y"])

import numpy as np

y_train = df["y"]
X_train = np.array(df["X"])

max_depth = range(3, 10, 2)
min_child_weight = range(1, 6, 2)
param_test2_1 = dict(max_depth=max_depth, min_child_weight=min_child_weight)
# prepare cross validation
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=3)
xgb2 = XGBClassifier(
    learning_rate=0.1,
    n_estimators=155,  # 数值大没关系，cv会自动返回合适的n_estimators
    max_depth=5,
    min_child_weight=1,
    gamma=0,
    subsample=0.3,
    colsample_bytree=0.8,
    colsample_bylevel=0.7,
    objective='multi:softmax',
    seed=3)

gsearch2_1 = GridSearchCV(xgb2, param_grid=param_test2_1, scoring='neg_log_loss', n_jobs=-1, cv=kfold)
gsearch2_1.fit(X_train, y_train)

print(gsearch2_1.best_params_)
print(gsearch2_1.best_score_)

print("gsearch2_1")
trainutils.printmetrics(gsearch2_1, X_train, y_train)
print("xgb2")
trainutils.printmetrics(xgb2, X_train, y_train)
