# -*- coding: utf-8 -*-

# 导入必要的工具包
# 独立调用xgboost或在sklearn框架下调用均可。
# 1. 模型训练：超参数调优
#     1. 初步确定弱学习器数目： 20分
#     2. 对树的最大深度（可选）和min_children_weight进行调优（可选）：20分
#     3. 对正则参数进行调优：20分
#     4. 重新调整弱学习器数目：10分
#     5. 行列重采样参数调整：10分
# 2. 调用模型进行测试10分
# 3. 生成测试结果文件10分

import xgboost as xgb
from xgboost import XGBClassifier

from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold

from sklearn.metrics import log_loss

import pandas as pd
import numpy as np

dpath = './data/'
train = pd.read_csv(dpath + 'RentListingInquries_FE_train_sample.csv')
test = pd.read_csv(dpath + 'RentListingInquries_FE_test.csv')

train_X = train.drop("interest_level", axis=1)
train_y = train["interest_level"]

# # 各类样本不均衡，交叉验证是采用StratifiedKFold，在每折采样时各类样本按比例采样
# # prepare cross validation
from sklearn.model_selection import StratifiedKFold
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=3)

#第一轮参数调整得到的n_estimators 是 277
estimators = 50
# 上一轮求得的参数是： {'max_depth': 3, 'min_child_weight': 3}
max_depth4 = 4
min_child_weight4 = 1

best_reg_alpha = 0.05    #default = 0
best_reg_lambda = 0.001   #default = 1

xgb7 = XGBClassifier(
        learning_rate =0.1,
        n_estimators=estimators,  #数值大没关系，cv会自动返回合适的n_estimators
        max_depth=max_depth4,
        min_child_weight=min_child_weight4,
        reg_alpha = best_reg_alpha,
        reg_lambda = best_reg_lambda,
        gamma=0,
        subsample=0.3,
        colsample_bytree=0.8,
        colsample_bylevel=0.7,
        objective= 'multi:softmax',
        nthread=-1,
        seed=3)

subsample = [i/10.0 for i in range(3,9)]
colsample_bytree = [i/10.0 for i in range(6,10)]
param_test7 = dict(subsample=subsample, colsample_bytree=colsample_bytree)

gsearch7 = GridSearchCV(xgb7, param_grid = param_test7, scoring='neg_log_loss',n_jobs=-1, cv=kfold)
gsearch7.fit(train_X , train_y)

print gsearch7.grid_scores_
print gsearch7.best_params_
print gsearch7.best_score_

# [mean: -0.64354, std: 0.01416, params: {'subsample': 0.3, 'colsample_bytree': 0.6}, mean: -0.64386, std: 0.01342, params: {'subsample': 0.4, 'colsample_bytree': 0.6}, mean: -0.64242, std: 0.01450, params: {'subsample': 0.5, 'colsample_bytree': 0.6}, mean: -0.64402, std: 0.01514, params: {'subsample': 0.6, 'colsample_bytree': 0.6}, mean: -0.64456, std: 0.01349, params: {'subsample': 0.7, 'colsample_bytree': 0.6}, mean: -0.64411, std: 0.01246, params: {'subsample': 0.8, 'colsample_bytree': 0.6}, mean: -0.64702, std: 0.01386, params: {'subsample': 0.3, 'colsample_bytree': 0.7}, mean: -0.64341, std: 0.01458, params: {'subsample': 0.4, 'colsample_bytree': 0.7}, mean: -0.64382, std: 0.01259, params: {'subsample': 0.5, 'colsample_bytree': 0.7}, mean: -0.64418, std: 0.01392, params: {'subsample': 0.6, 'colsample_bytree': 0.7}, mean: -0.64332, std: 0.01427, params: {'subsample': 0.7, 'colsample_bytree': 0.7}, mean: -0.64401, std: 0.01400, params: {'subsample': 0.8, 'colsample_bytree': 0.7}, mean: -0.64580, std: 0.01302, params: {'subsample': 0.3, 'colsample_bytree': 0.8}, mean: -0.64402, std: 0.01291, params: {'subsample': 0.4, 'colsample_bytree': 0.8}, mean: -0.64275, std: 0.01231, params: {'subsample': 0.5, 'colsample_bytree': 0.8}, mean: -0.64146, std: 0.01372, params: {'subsample': 0.6, 'colsample_bytree': 0.8}, mean: -0.64261, std: 0.01315, params: {'subsample': 0.7, 'colsample_bytree': 0.8}, mean: -0.64343, std: 0.01515, params: {'subsample': 0.8, 'colsample_bytree': 0.8}, mean: -0.64591, std: 0.01318, params: {'subsample': 0.3, 'colsample_bytree': 0.9}, mean: -0.64254, std: 0.01401, params: {'subsample': 0.4, 'colsample_bytree': 0.9}, mean: -0.64173, std: 0.01402, params: {'subsample': 0.5, 'colsample_bytree': 0.9}, mean: -0.64267, std: 0.01201, params: {'subsample': 0.6, 'colsample_bytree': 0.9}, mean: -0.64322, std: 0.01399, params: {'subsample': 0.7, 'colsample_bytree': 0.9}, mean: -0.64367, std: 0.01404, params: {'subsample': 0.8, 'colsample_bytree': 0.9}]
# {'subsample': 0.6, 'colsample_bytree': 0.8}
# -0.641461736183


from sklearn.metrics import accuracy_score
xgb_param = xgb7.get_xgb_params()
xgb_param['num_class'] = 3
# 直接调用xgboost，而非sklarn的wrapper类
# num_round = 2
dtrain = xgb.DMatrix(train_X, label=train_y)
bst = xgb.train(xgb_param, dtrain, 5)
train_predictions = bst.predict(dtrain)
y_train = dtrain.get_label()
train_accuracy = accuracy_score(y_train, train_predictions)
print ("Train Accuary: %.2f%%" % (train_accuracy * 100.0))

