# -*- coding: utf-8 -*-

# 导入必要的工具包
# 独立调用xgboost或在sklearn框架下调用均可。
# 1. 模型训练：超参数调优
#     1. 初步确定弱学习器数目： 20分
#     2. 对树的最大深度（可选）和min_children_weight进行调优（可选）：20分
#     3. 对正则参数进行调优：20分
#     4. 重新调整弱学习器数目：10分
#     5. 行列重采样参数调整：10分
# 2. 调用模型进行测试10分
# 3. 生成测试结果文件10分

import xgboost as xgb

import pandas as pd
import numpy as np

dpath = './data/'
dtrain = xgb.DMatrix(dpath + 'RentListingInquries_FE_train.bin')
dtest = xgb.DMatrix(dpath + 'RentListingInquries_FE_test.bin')

print dtrain.feature_names
print dtrain.num_row()
print dtrain.num_col()
print dtest.num_row()

y_train = dtrain.get_label()
print y_train

## 判断数据是否分布均匀
from matplotlib import pyplot
import seaborn as sns
sns.countplot(y_train);
pyplot.xlabel('target');
pyplot.ylabel('Number of interest');
pyplot.show()
## 每类样本分布不是很均匀，所以交叉验证时也考虑各类样本按比例抽取

# 各类样本不均衡，交叉验证是采用StratifiedKFold，在每折采样时各类样本按比例采样
# prepare cross validation
from sklearn.model_selection import StratifiedKFold
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=3)




# from xgboost import XGBClassifier
#
# from sklearn.model_selection import GridSearchCV
#

#
# # 计算分类正确率
# from sklearn.metrics import accuracy_score
#
#
#
#
#
# # 1. 确定学习器数据，默认取100 ： 解释也就是弱学习器的最大迭代次数，或者说最大的弱学习器的个数
# n_estimators = 100



# # specify parameters via map
# param = {'max_depth':2, 'eta':1, 'silent':0, 'objective':'binary:logistic' }
#

# early_stopping_rounds = 10
# cv_result = xgb.cv(param, dtrain, num_boost_round=n_estimators, folds=5,
#                    metrics='logloss', early_stopping_rounds=early_stopping_rounds)
#
# # 最佳参数n_estimators
# n_estimators = cv_result.shape[0]
# print("Best n_estimators:", n_estimators)
#
# # 采用交叉验证得到的最佳参数n_estimators，训练模型
# bst = xgb.train(param, dtrain, n_estimators)
#
# # plot
# test_means = cv_result['test-logloss-mean']
# test_stds = cv_result['test-logloss-std']
#
# x_axis = range(0, cv_result.shape[0])
#
#
# from matplotlib import pyplot
# pyplot.errorbar(x_axis, test_means, yerr=test_stds, label='Test')
#
# pyplot.title("XGBoost n_estimators vs Log Loss")
# pyplot.xlabel('n_estimators')
# pyplot.ylabel('Log Loss')
#
# pyplot.show()


# # make prediction
# preds = bst.predict(dtest)
# y_pred = [round(value) for value in preds]
# y_test = dtest.get_label()
# test_accuracy = accuracy_score(y_test, y_pred)
# print("Test Accuracy: %.2f%%" % (test_accuracy * 100.0))


