# -*- coding: utf-8 -*-

# 导入必要的工具包
import xgboost as xgb

# 计算分类正确率
from sklearn.metrics import accuracy_score

# read in data，数据在xgboost安装的路径下的demo目录,现在我们将其copy到当前代码下的data目录
dpath = '../course/data/'
dtrain = xgb.DMatrix(dpath + 'agaricus.txt.train')
dtest = xgb.DMatrix(dpath + 'agaricus.txt.test')

print(dtrain.num_col())
print(dtrain.num_row())

# specify parameters via map
param = {'max_depth':2, 'eta':1, 'silent':0, 'objective':'binary:logistic' }

n_estimators = 1000
early_stopping_rounds = 10
cv_result = xgb.cv(param, dtrain, num_boost_round=n_estimators, folds=5,
                   metrics='logloss', early_stopping_rounds=early_stopping_rounds)

# 最佳参数n_estimators
n_estimators = cv_result.shape[0]
print("Best n_estimators:", n_estimators)

# 采用交叉验证得到的最佳参数n_estimators，训练模型
bst = xgb.train(param, dtrain, n_estimators)

# plot
test_means = cv_result['test-logloss-mean']
test_stds = cv_result['test-logloss-std']

x_axis = range(0, cv_result.shape[0])


from matplotlib import pyplot
pyplot.errorbar(x_axis, test_means, yerr=test_stds, label='Test')

pyplot.title("XGBoost n_estimators vs Log Loss")
pyplot.xlabel('n_estimators')
pyplot.ylabel('Log Loss')

pyplot.show()


# make prediction
preds = bst.predict(dtest)
y_pred = [round(value) for value in preds]
y_test = dtest.get_label()
test_accuracy = accuracy_score(y_test, y_pred)
print("Test Accuracy: %.2f%%" % (test_accuracy * 100.0))


