import xgboost as xgb
from xgboost import XGBClassifier
import pandas as pd
import numpy as np
import seaborn as sn
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score
from sklearn.metrics import log_loss
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV


params = {'objective': 'multi:softprob', 'num_class': 3}
fold = StratifiedKFold(5, shuffle=True, random_state=3)
n_estimator = 313

xgb_c = XGBClassifier(n_estimators=n_estimator,
                      max_depth=6,
                      min_child_weight=9,
                      objective='multi/softprob',
                      random_state=10,
                      subsample=0.7,
                      colsample_bytreecolsample_bytree=0.7,
                      reg_lambda=0.1,
                      reg_alpha=1
                      )

plt_params = {
              'figure.figsize': (30, 10)
              }

sn.set_style('whitegrid')
sn.set_context('talk')

plt.rcParams.update(plt_params)
pd.options.display.max_colwidth = 600

def load_file():
    # _data = xgb.DMatrix('RentListingInquries_FE_train.bin')
    _data = pd.read_csv('RentListingInquries_FE_train.csv')
    _data.to_csv()
    print(_data.info())
    return _data


def split_x_y(_data):
    x = _data.drop(['interest_level'], axis=1)
    y = _data['interest_level']
    return x, y


def default_xgb(_x_data, _y_data):
    # params = {'max_depth': 2, 'eta': 1, 'silent': 0, 'objective': 'multi:softmax', 'num_class': 3}
    dtrain = xgb.DMatrix(_x_data, label=_y_data)
    d_xgb = xgb.train(params=params, dtrain=dtrain)
    d_xgb_pred = d_xgb.predict(dtrain)

    # 0.6106726640439415
    print('Default xgboost accuracy log loss: ', log_loss(_y_data, d_xgb_pred))
    # 0.7386529421300049
    print('Default xgboost accuracy: ', accuracy_score(_y_data, d_xgb_pred))

    return dtrain


def estimator_cv(_dtrain):
    n_estimators = 1000
    early_stop = 50

    cv_results = xgb.cv(params=params,
                        dtrain=_dtrain,
                        early_stopping_rounds=early_stop,
                        folds=fold, num_boost_round=n_estimators,
                        metrics='mlogloss')

    _n_estimator = cv_results.shape[0]

    # 93
    print('Best n_estimator: ', n_estimator)

    bst = xgb.train(params, _dtrain, num_boost_round=n_estimator)

    d_xgb_pred = bst.predict(_dtrain)

    # 0.8164005511428108
    print('Best n_estimator xgboost accuracy: ', accuracy_score(y_data, d_xgb_pred))

    return _n_estimator


def tree_depth_and_min_child_weight_cv(x, y):
    max_depths = range(1, 10, 2)
    min_weight = range(1, 10, 2)
    grid_params = dict(max_depth=max_depths, min_child_weight=min_weight)
    xgb_c = XGBClassifier(n_estimators=n_estimator, objective='multi/softprob', random_state=10)

    grid = GridSearchCV(xgb_c, param_grid=grid_params, scoring='neg_log_loss', n_jobs=-1, cv=fold, verbose=2)
    grid.fit(x, y)

    # 0.5855763110117655
    print('MaxDepth and minChildrenWeight cv best score: ', -grid.best_score_)
    # {'max_depth': 9, 'min_child_weight': 9}
    print('MaxDepth and minChildrenWeight cv best params: ', grid.best_params_)

    yp = grid.predict(x)

    accuracy = accuracy_score(y, yp)

    # 0.8223982817312369
    print('MaxDepth and minChildrenWeight cv accuracy score: ', accuracy)


def tree_depth_and_min_child_weight_cv_farther(x, y):
    max_depths = (8, 9, 10)
    min_weight = (11, 12, 13)
    grid_params = dict(max_depth=max_depths, min_child_weight=min_weight)
    xgb_c = XGBClassifier(n_estimators=n_estimator, objective='multi/softprob', random_state=10)

    grid = GridSearchCV(xgb_c, param_grid=grid_params, scoring='neg_log_loss', n_jobs=-1, cv=fold, verbose=1)
    grid.fit(x, y)

    # 0.5855763110117655
    # 0.5851897265196894
    # 0.5845820323325979
    print('MaxDepth and minChildrenWeight cv best score: ', -grid.best_score_)
    # {'max_depth': 9, 'min_child_weight': 9}
    # {'max_depth': 9, 'min_child_weight': 10}
    # {'min_child_weight': 12}
    print('MaxDepth and minChildrenWeight cv best params: ', grid.best_params_)

    yp = grid.predict(x)

    accuracy = accuracy_score(y, yp)

    # 0.8223982817312369
    # 0.8217093532177014
    # 0.8160966120927217
    print('MaxDepth and minChildrenWeight cv accuracy score: ', accuracy)

    # logloss和n_estimator关系图
    test_means = grid.cv_results_['mean_test_score']
    test_stds = grid.cv_results_['std_test_score']
    train_means = grid.cv_results_['mean_train_score']
    train_stds = grid.cv_results_['std_train_score']

    pd.DataFrame(grid.cv_results_).to_csv('my_preds_maxdepth_min_child_weights_1.csv')

    # plot results
    test_scores = np.array(test_means).reshape(len(max_depths), len(min_weight))
    train_scores = np.array(train_means).reshape(len(max_depths), len(min_weight))

    for i, value in enumerate(max_depths):
        plt.plot(min_weight, -test_scores[i], label='test_max_depth:' + str(value))
    # for i, value in enumerate(min_child_weight):
    #    pyplot.plot(max_depth, train_scores[i], label= 'train_min_child_weight:'   + str(value))

    plt.legend()
    plt.xlabel('max_depth')
    plt.ylabel('Log Loss')
    plt.savefig('max_depth_vs_min_child_weght_1.png')


def reg_param_cv(x, y):
    # reg_alpha = [0.1, 1, 1.5, 2]
    # reg_lambda = [0.1, 1, 1.5, 2]

    # reg_alpha = [0.01, 0.05, 0.1, 0.2, 0.3]
    # reg_lambda = [0.1, 0.5, 1, 1.5]

    reg_alpha = [0.001, 0.005, 0.01, 0.02]
    reg_lambda = [0.4, 0.5, 0.6]

    cv_params = dict(reg_alpha=reg_alpha, reg_lambda=reg_lambda)

    xgb_c = XGBClassifier(n_estimators=n_estimator, objective='multi/softprob', random_state=10, max_depth=9, min_child_weight=12)
    grid = GridSearchCV(xgb_c, param_grid=cv_params, scoring='neg_log_loss', n_jobs=-1, cv=fold, verbose=2)
    grid.fit(x, y)

    # 0.5844959571105534
    # 0.5842941620477203
    # 0.5842941620477203
    print('Reg cv best score: ', -grid.best_score_)
    # 0.1 1
    # 0.01 0.5
    print('Reg cv best params:', grid.best_params_)

    yp = grid.predict(x)

    accuracy = accuracy_score(y, yp)

    # 0.8156305722159183
    # 0.8162384503160967
    # 0.8162384503160967
    print('Reg cv accuracy score: ', accuracy)


def subsample_cv(x, y):
    # subsamples = [0.2, 0.4, 0.6, 0.8]
    # colsample_bytrees = [0.2, 0.4, 0.6, 0.8]

    subsamples = [0.7, 0.75, 0.8, 0.85, 0.9]
    colsample_bytrees = [0.7, 0.75, 0.8, 0.85, 0.9]

    xgb_c = XGBClassifier(n_estimators=n_estimator,
                          max_depth=9,
                          min_child_weight=12,
                          reg_alpha=0.01,
                          reg_lambda=0.5,
                          objective='multi/softprob',
                          random_state=10,
                          n_classes=3
                          )

    cv_params = dict(subsample=subsamples, colsample_bytree=colsample_bytrees)
    grid = GridSearchCV(xgb_c, cv_params, cv=fold, scoring='neg_log_loss', n_jobs=-1, verbose=2)
    grid.fit(x, y)

    # 0.5838856273310297
    # 0.583227155207184
    print('SubSample cv best score: ', -grid.best_score_)
    #  {'colsample_bytree': 0.8, 'subsample': 0.8}
    # {'colsample_bytree': 0.75, 'subsample': 0.85}
    print('SubSample cv best params: ', grid.best_params_)
    
    yp = grid.predict(x)

    accuracy = accuracy_score(y, yp)

    # 0.8195007294537202
    # 0.8202504457772735
    print('SubSample cv accuracy: ', accuracy)


def estimator_cv_re(x, y):
    n_estimators = 1000
    early_stop = 50

    xgb_c = XGBClassifier(n_estimators=1000,
                          max_depth=9,
                          min_child_weight=12,
                          reg_alpha=0.01,
                          reg_lambda=0.5,
                          subsample=0.75,
                          colsample_bytree=0.85,
                          objective='multi/softprob',
                          num_class=3,
                          random_state=10
                          )

    _dtrain = xgb.DMatrix(x, label=y)

    params['max_depth'] = 9
    params['min_child_weight'] = 12
    params['reg_alpha'] = 0.01
    params['reg_lambda'] = 0.5
    params['subsample'] = 0.75
    params['colsample_bytree'] = 0.85

    cv_results = xgb.cv(params=xgb_c.get_xgb_params(),
                        dtrain=_dtrain,
                        early_stopping_rounds=early_stop,
                        folds=fold,
                        num_boost_round=n_estimators,
                        metrics='mlogloss',
                        seed=10)

    _n_estimator = cv_results.shape[0]

    # 93
    print('Best n_estimator re: ', n_estimator)

    bst = xgb.train(params, _dtrain, num_boost_round=n_estimator)

    d_xgb_pred = bst.predict(_dtrain)

    # 0.8840573836926569
    print('Best n_estimator xgboost accuracy re: ', accuracy_score(y_data, d_xgb_pred))

    return _n_estimator


data = load_file()
x_data, y_data = split_x_y(data)
# dtrain = default_xgb(x_data, y_data)
# n_estimator = estimator_cv(dtrain)
# tree_depth_and_min_child_weight_cv(x_data, y_data)
tree_depth_and_min_child_weight_cv_farther(x_data, y_data)
# reg_param_cv(x_data, y_data)
# subsample_cv(x_data, y_data)
# estimator_cv_re(x_data, y_data)
