from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold


from sklearn.metrics import log_loss
if __name__=='__main__':

    # lsi_matrix = np.loadtxt(open("../model/all_lsi.csv","rb"),delimiter=",",skiprows=0)
    # lsi_matrix = np.loadtxt(open("../model/keyword_word2vec.csv", "rb"), delimiter=",", skiprows=0)
    lsi_matrix = np.load('../model/doc_vectors_2row.npy')
    df_train = pd.read_csv('../data/train_processed.csv')

    X = lsi_matrix[0:4774, :]
    Y = df_train['label'].values

    # 数据标准化
    # ss_X = StandardScaler()
    # X = ss_X.fit_transform(X)

    seed = 4
    np.random.seed(seed)
    X_train, X_val, y_train, y_val = train_test_split(X, Y, test_size=0.20, random_state=seed)

    # kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=3)
    #
    # n_estimators = [200, 300, 500, 800]
    # # n_estimators = [800]
    # param_test_1 = dict(n_estimators=n_estimators)
    # max_depth = range(3, 10, 2)
    # min_child_weight = range(1, 6, 2)
    # param_test_2 = dict(max_depth=max_depth, min_child_weight=min_child_weight)
    # subsample = [i / 10 for i in range(3, 9)]
    # colsample_bytree = [i / 10 for i in range(6, 10)]
    # param_test_3 = dict(subsample=subsample, colsample_bytree=colsample_bytree)
    # reg_alpha = [1.8, 1.7, 1.9]
    # reg_lambda = [1.2]
    # param_test_4 = dict(reg_alpha=reg_alpha, reg_lambda=reg_lambda)
    #
    # xgb = XGBClassifier(
    #     learning_rate=0.1,
    #     n_estimators=300,  # 第一轮参数调整得到的n_estimators最优值300
    #     max_depth=5,
    #     min_child_weight=1,
    #     gamma=0,
    #     subsample=0.5,
    #     colsample_bytree=0.6,
    #     colsample_bylevel=0.7,
    #     reg_alpha=1.7,
    #     reg_lambda=1.2,
    #     nthread=8,
    #     objective='multi:softprob',
    #     seed=3)
    #
    # gsearch = GridSearchCV(xgb, param_grid=param_test_4, scoring='neg_log_loss', n_jobs=-1, cv=kfold)
    # gsearch.fit(X_train , y_train)
    #
    # print(gsearch.grid_scores_)
    # print(gsearch.best_params_)
    # print(gsearch.best_score_)

    best_xgb = XGBClassifier(
            learning_rate =0.1,
            n_estimators=350,
            max_depth=5,
            min_child_weight=1,
            gamma=0,
            subsample=0.5,
            colsample_bytree=0.6,
            colsample_bylevel = 0.7,
            reg_alpha = 1.9,
            reg_lambda = 1.2,
            nthread=8,
            objective= 'multi:softprob',
            seed=3)
    best_xgb.fit(X_train , y_train)
    train_predprob = best_xgb.predict_proba(X_train)
    train_predict = best_xgb.predict(X_train)

    predict = best_xgb.predict(X_val)
    predict_proba = best_xgb.predict_proba(X_val)


    print ("Accuracy of train: %f"%accuracy_score(y_train, train_predict))

    print ("Accuracy of val: %f"%accuracy_score(y_val, predict))


    print(classification_report(y_val, predict))