# 基于scikit-learn接口的分类

from sklearn.datasets import load_iris
import xgboost as xgb
from xgboost import plot_importance
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold
from sklearn.metrics import accuracy_score

# read in the iris data
iris = load_iris()

X = iris.data
y = iris.target

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)

# 训练模型

n_estimators = range(50, 400, 50)
model = xgb.XGBRFClassifier()


# param = {
#     'max_depth': 5,
#     'learning_rate': 0.1,
#     'n_estimators': n_estimators,
#     'silent': True,
#     'objective': 'multi:softmax'
# }

params = {
    'n_estimators': n_estimators
}

print('type of params: ', type(params))

kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=7)
grid_search = GridSearchCV(model, params, scoring='neg_log_loss', n_jobs=-1, cv=kfold)
grid_result = grid_search.fit(X_train, y_train)


# summarize results
print('Best: %f using %s' % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_['mean_test_score']
stds = grid_result.cv_results_['std_test_score']
params = grid_result.cv_results_['params']
for mean, stdev, param in zip(means, stds, params):
    print("%f (%f) with: %r" % (mean, stdev, param))

# plot
plt.errorbar(n_estimators, means, yerr=stds)
plt.title('XGBoost n_estimators vs Log Loss')
plt.xlabel('n_estimators')
plt.ylabel('Log Loss')
plt.savefig('n_estimators.png')

# model.fit(X_train, y_train)

# 对测试集进行预测
# ans = model.predict(X_test)

# 计算准确率
# test_accuracy = accuracy_score(y_test, ans)
# print('Accuracy: %.2f %%' % (100*test_accuracy))

# 显示特征重要性
# plot_importance(model)
# plt.savefig('./importance.png')
# plt.show()

