# -*- coding: utf-8 -*-
# @file: xgboost_process.py
# @author: ZhuJiahui
# @time: 2018/12/20 22:00
# @version: v1.0


import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.datasets import make_hastie_10_2
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from xgboost import XGBClassifier
from xgboost import plot_importance

from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold


def mnist_xgboost_test():
    # 加载MNIST数据集
    digits = datasets.load_digits()
    print(digits.data.shape)
    print(digits.target.shape)

    # 分割数据集
    x_train, x_test, y_train, y_test = train_test_split(digits.data,
                                                        digits.target,
                                                        test_size=0.3,
                                                        random_state=33)

    # 构建模型
    # n_estimators: 树的个数--1000棵树建立xgboost
    # max_depth: 树的深度
    # min_child_weight: 叶子节点最小权重
    # gamma: 惩罚项中叶子结点个数前的参数
    # subsample: 随机选择80%样本建立决策树
    # colsample_bytree: 随机选择80%特征建立决策树
    # objective: 指定损失函数
    # scale_pos_weight: 正例相对负例所占的权重 解决样本个数不平衡的问题
    # random_state: 随机数
    model = XGBClassifier(learning_rate=0.1, n_estimators=1000, max_depth=6,
                          min_child_weight=1, gamma=0, subsample=0.8,
                          colsample_bytree=0.8, objective='multi:softmax',
                          scale_pos_weight=1, random_state=27)

    model.fit(X=x_train, y=y_train, eval_set=[(x_test, y_test)],
              eval_metric="mlogloss", early_stopping_rounds=10, verbose=True)

    fig, ax = plt.subplots(figsize=(15, 15))
    plot_importance(model,
                    height=0.5,
                    ax=ax,
                    max_num_features=64)
    plt.show()

    # 预测
    y_pred = model.predict(x_test)
    # 评估
    accuracy = accuracy_score(y_test, y_pred)
    print("accuarcy: %.2f%%" % (accuracy * 100.0))


def mnist_xgboost_tune_test():
    # 加载MNIST数据集
    digits = datasets.load_digits()
    print(digits.data.shape)
    print(digits.target.shape)

    # 分割数据集
    x_train, x_test, y_train, y_test = train_test_split(digits.data,
                                                        digits.target,
                                                        test_size=0.3,
                                                        random_state=33)

    learning_rate = [0.0001, 0.001, 0.01, 0.1, 0.2, 0.3]
    param_grid = dict(learning_rate=learning_rate)
    kfold = StratifiedKFold(n_splits=10, shuffle=True, random_state=7)


    # 构建模型
    # n_estimators: 树的个数--1000棵树建立xgboost
    # max_depth: 树的深度
    # min_child_weight: 叶子节点最小权重
    # gamma: 惩罚项中叶子结点个数前的参数
    # subsample: 随机选择80%样本建立决策树
    # colsample_bytree: 随机选择80%特征建立决策树
    # objective: 指定损失函数
    # scale_pos_weight: 正例相对负例所占的权重 解决样本个数不平衡的问题
    # random_state: 随机数
    model = XGBClassifier(nthread=2, n_estimators=1000, max_depth=6,
                          min_child_weight=1, gamma=0, subsample=0.8,
                          colsample_bytree=0.8, objective='multi:softmax',
                          scale_pos_weight=1, random_state=27)

    # model.fit(X=x_train, y=y_train, eval_set=[(x_test, y_test)],
    #           eval_metric="mlogloss", early_stopping_rounds=10, verbose=True)

    grid_search = GridSearchCV(model, param_grid, scoring="neg_log_loss",
                               n_jobs=-1, cv=kfold)
    grid_result = grid_search.fit(X=x_train, y=y_train)
    print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))


def hastie_xgboost_test():
    # Generates data for binary classification used in
    # Hastie et al. 2009, Example 10.2.
    X, y = make_hastie_10_2(random_state=0)

    ##test_size测试集合所占比例
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5,
                                                        random_state=0)
    model = XGBClassifier(
        silent=True,  # 设置成1(True)则没有运行信息输出，最好是设置为0.是否在运行升级时打印消息。
        # nthread=4,# cpu 线程数 默认最大
        learning_rate=0.3,  # 如同学习率
        min_child_weight=1,
        # 这个参数默认是 1，是每个叶子里面 h 的和至少是多少，对正负样本不均衡时的 0-1 分类而言
        # ，假设 h 在 0.01 附近，min_child_weight 为 1 意味着叶子节点中最少需要包含 100 个样本。
        # 这个参数非常影响结果，控制叶子节点中二阶导的和的最小值，该参数值越小，越容易 overfitting。
        max_depth=6,  # 构建树的深度，越大越容易过拟合
        gamma=0,  # 树的叶子节点上作进一步分区所需的最小损失减少,越大越保守，一般0.1、0.2这样子。
        subsample=1,  # 随机采样训练样本 训练实例的子采样比
        max_delta_step=0,  # 最大增量步长，我们允许每个树的权重估计。
        colsample_bytree=1,  # 生成树时进行的列采样
        reg_lambda=1,  # 控制模型复杂度的权重值的L2正则化项参数，参数越大，模型越不容易过拟合。
        # reg_alpha=0, # L1 正则项参数
        # scale_pos_weight=1, #如果取值大于0的话，在类别样本不平衡的情况下有助于快速收敛。平衡正负权重
        # objective= 'multi:softmax', #多分类的问题 指定学习任务和相应的学习目标
        # num_class=10, # 类别数，多分类与 multisoftmax 并用
        n_estimators=100,  # 树的个数
        seed=1000  # 随机种子
        # eval_metric= 'auc'
    )
    model.fit(X_train, y_train, eval_metric='auc')
    y_true, y_pred = y_test, model.predict(X_test)
    print("Accuracy : %.2f%%" % (100 * accuracy_score(y_true, y_pred)))


if __name__ == "__main__":
    # mnist_xgboost_test()
    mnist_xgboost_tune_test()
    # hastie_xgboost_test()
