# -*- coding: utf-8 -*-

# 导入必要的工具包
# 独立调用xgboost或在sklearn框架下调用均可。
# 1. 模型训练：超参数调优
#     1. 初步确定弱学习器数目： 20分
#     2. 对树的最大深度（可选）和min_children_weight进行调优（可选）：20分
#     3. 对正则参数进行调优：20分
#     4. 重新调整弱学习器数目：10分
#     5. 行列重采样参数调整：10分
# 2. 调用模型进行测试10分
# 3. 生成测试结果文件10分

import xgboost as xgb
from xgboost import XGBClassifier

from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold

from sklearn.metrics import log_loss
# 计算分类正确率
from sklearn.metrics import accuracy_score

import pandas as pd
import numpy as np

dpath = './data/'
train2 = pd.read_csv(dpath + 'RentListingInquries_FE_train.csv')
test_X = pd.read_csv(dpath + 'RentListingInquries_FE_test.csv')

train2_X = train2.drop("interest_level", axis=1)
train2_y = train2["interest_level"]

#第二轮参数调整得到的n_estimators 是 277
estimators = 98
# 上一轮求得的参数是： {'max_depth': 3, 'min_child_weight': 3}
max_depth4 = 3
min_child_weight4 = 3
best_reg_alpha = 0.001
best_reg_lambda = 0.01
best_subsample = 0.8
best_colsample = 0.8

xgb9 = XGBClassifier(
        learning_rate =0.1,
        n_estimators=estimators,  #数值大没关系，cv会自动返回合适的n_estimators
        max_depth=max_depth4,
        min_child_weight=min_child_weight4,
        reg_alpha = best_reg_alpha,
        reg_lambda = best_reg_lambda,
        gamma=0,
        subsample=best_subsample,
        colsample_bytree=best_colsample,
        colsample_bylevel=best_colsample,
        objective= 'multi:softmax',
        nthread=-1,
        seed=3)

gbm = xgb9.fit(train2_X, train2_y)
train2_predict = gbm.predict(train2_X)
test2_predict = gbm.predict(test_X)

print(test2_predict)


test_X["interest_level"] = test2_predict
print(test_X.head(5))
test_X.to_csv(dpath + 'RentListingInquries_FE_test_predict.csv')

