# -*- coding: utf-8 -*-

# 导入必要的工具包
# 独立调用xgboost或在sklearn框架下调用均可。
# 1. 模型训练：超参数调优
#     1. 初步确定弱学习器数目： 20分
#     2. 对树的最大深度（可选）和min_children_weight进行调优（可选）：20分
#     3. 对正则参数进行调优：20分
#     4. 重新调整弱学习器数目：10分
#     5. 行列重采样参数调整：10分
# 2. 调用模型进行测试10分
# 3. 生成测试结果文件10分

import xgboost as xgb

import pandas as pd
import numpy as np

dpath = './data/'
train = pd.read_csv(dpath + 'RentListingInquries_FE_train_sample.csv')

print train.describe()
print ">>"
print train.head(1)
print ">>"
print train.info()

train_X = train.drop("interest_level", axis=1)
train_y = train["interest_level"]

## 判断数据是否分布均匀
from matplotlib import pyplot
import seaborn as sns
sns.countplot(train_y);
pyplot.xlabel('target');
pyplot.ylabel('Number of interest');
pyplot.show()
## 每类样本分布不是很均匀，所以交叉验证时也考虑各类样本按比例抽取

# # 各类样本不均衡，交叉验证是采用StratifiedKFold，在每折采样时各类样本按比例采样
# # prepare cross validation
from sklearn.model_selection import StratifiedKFold
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=3)


# 直接调用xgboost内嵌的交叉验证（cv），可对连续的n_estimators参数进行快速交叉验证
# 而GridSearchCV只能对有限个参数进行交叉验证
def modelfit(alg, X_train, y_train, cv_folds=None, early_stopping_rounds=10):
    xgb_param = alg.get_xgb_params()
    xgb_param['num_class'] = 3

    # 直接调用xgboost，而非sklarn的wrapper类
    xgtrain = xgb.DMatrix(X_train, label=y_train)

    cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], folds= cv_folds.split(X_train, y_train),
                      metrics='mlogloss', early_stopping_rounds=early_stopping_rounds,)


    # 最佳参数n_estimators
    n_estimators = cvresult.shape[0]

    print "best n_estimators"
    print n_estimators

    cvresult.to_csv('nestimators.csv', index_label='n_estimators')

#params = {"objective": "multi:softprob", "eval_metric":"mlogloss", "num_class": 9}
from xgboost import XGBClassifier
xgb1 = XGBClassifier(
        learning_rate =0.1,
        n_estimators=1000,  #数值大没关系，cv会自动返回合适的n_estimators
        max_depth=5,
        min_child_weight=1,
        gamma=0,
        subsample=0.3,
        colsample_bytree=0.8,
        colsample_bylevel=0.7,
        objective= 'multi:softmax',
        nthread=-1,
        seed=3)

modelfit(xgb1, train_X, train_y, cv_folds = kfold)

from sklearn.metrics import accuracy_score
xgb_param = xgb1.get_xgb_params()
xgb_param['num_class'] = 3
# 直接调用xgboost，而非sklarn的wrapper类
# num_round = 2
dtrain = xgb.DMatrix(train_X, label=train_y)
bst = xgb.train(xgb_param, dtrain, 5)
train_predictions = bst.predict(dtrain)
# train_predictions = [round(value) for value in train_preds]
y_train = dtrain.get_label()
print y_train
print train_predictions
train_accuracy = accuracy_score(y_train, train_predictions)
print ("Train Accuary: %.2f%%" % (train_accuracy * 100.0))
# best n_estimators
# 277

# 73.6%


