# 导入必要的工具包
from xgboost import XGBClassifier
import xgboost as xgb

import pandas as pd
import numpy as np

from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold

from sklearn.metrics import log_loss

from matplotlib import pyplot
import seaborn as sns

data=pd.read_csv('RentListingInquries_FE_train.csv')
print(data.info())
print(data.head())
# 每类样本分布不是很均匀
# sns.countplot(data.interest_level)
# drop ids and get labels
y_train = data['interest_level']
train = data.drop(["interest_level"], axis=1)
X_train = np.array(train)
# prepare cross validation
kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=33)

# 训练样本4w+，太慢，用train_test_split估计模型性能
from sklearn.model_selection import train_test_split
X_train_part, X_val, y_train_part, y_val = train_test_split(X_train, y_train, train_size = 0.1,random_state = 33,stratify=y_train)

def modelfit(alg, X_train, y_train, cv_folds=None, early_stopping_rounds=50):
    xgb_param = alg.get_xgb_params()
    xgb_param['num_class'] = 3

    # 直接调用xgboost，而非sklarn的wrapper类
    xgtrain = xgb.DMatrix(X_train, label=y_train)

    cvresult = xgb.cv(xgb_param, xgtrain, num_boost_round=alg.get_params()['n_estimators'], folds=cv_folds,
                      metrics='mlogloss', early_stopping_rounds=early_stopping_rounds)

    cvresult.to_csv('1_nestimators.csv', index_label='n_estimators')

    # 最佳参数n_estimators
    n_estimators = cvresult.shape[0]
    print('best n_estimators is ',n_estimators)
    # 采用交叉验证得到的最佳参数n_estimators，训练模型
    # alg.set_params(n_estimators=n_estimators)
    # alg.fit(X_train, y_train, eval_metric='mlogloss')
    return n_estimators
#params = {"objective": "multi:softprob", "eval_metric":"mlogloss", "num_class": 9}
# max_depth=5,min_child_weight=1,是我第一次设置的初始值
# 再下面网格搜索之后，确定了最佳值为'max_depth': 3, 'min_child_weight': 2，在重新代入了进来
# 调整max_depth和min_child_weight之后再次调整n_estimators
best_max_depth=1
best_min_child_weight=3
bestn_estimators=1000
best_subsample=0.2
best_colsample_bytree=0.7
best_colsample_bylevel=0.7
xgb1 = XGBClassifier(
        learning_rate =0.1,
        n_estimators=1000,  #数值大没关系，cv会自动返回合适的n_estimators
        max_depth=best_max_depth,
        min_child_weight=best_min_child_weight,
        gamma=0,
        subsample=best_subsample,
        colsample_bytree=best_colsample_bytree,
        colsample_bylevel=best_colsample_bylevel,
        objective= 'multi:softprob',
        seed=33)

bestn_estimators=modelfit(xgb1, X_train_part, y_train_part, cv_folds = kfold)
cvresult = pd.DataFrame.from_csv('1_nestimators.csv')
#
# # plot
# test_means = cvresult['test-mlogloss-mean']
# test_stds = cvresult['test-mlogloss-std']
#
# train_means = cvresult['train-mlogloss-mean']
# train_stds = cvresult['train-mlogloss-std']
#
# x_axis = range(0, cvresult.shape[0])
#
# pyplot.errorbar(x_axis, test_means, yerr=test_stds, label='Test')
# pyplot.errorbar(x_axis, train_means, yerr=train_stds, label='Train')
# pyplot.title("XGBoost n_estimators vs Log Loss")
# pyplot.xlabel('n_estimators')
# pyplot.ylabel('Log Loss')
# pyplot.savefig('n_estimators4_1.png')

cvresult = pd.DataFrame.from_csv('1_nestimators.csv')

#max_depth 建议3-10， min_child_weight=1／sqrt(ratio_rare_event) =5.5
# max_depth和min_child_weight已经经过精细调整，在此只保留最终精细调整结果，中间调整步骤不做多余的上传
max_depth = range(1,3,1)
min_child_weight = range(2,4,1)
param_test2_1 = dict(max_depth=max_depth, min_child_weight=min_child_weight)

xgb2_1 = XGBClassifier(
        learning_rate =0.1,
        n_estimators=bestn_estimators,  #参数调整得到的n_estimators最优值
        max_depth=best_max_depth,
        min_child_weight=best_min_child_weight,
        gamma=0,
        subsample=0.2,
        colsample_bytree=0.7,
        colsample_bylevel = 0.7,
        objective= 'multi:softprob',
        seed=33)


gsearch2_1 = GridSearchCV(xgb2_1, param_grid = param_test2_1, scoring='neg_log_loss', cv=kfold)
gsearch2_1.fit(X_train_part , y_train_part)

# summarize results
print("Best: %f using %s" % (gsearch2_1.best_score_, gsearch2_1.best_params_))
test_means = gsearch2_1.cv_results_['mean_test_score']
test_stds = gsearch2_1.cv_results_['std_test_score']
train_means = gsearch2_1.cv_results_['mean_train_score']
train_stds = gsearch2_1.cv_results_['std_train_score']

pd.DataFrame(gsearch2_1.cv_results_).to_csv('my_preds_maxdepth_min_child_weights_1.csv')
#
# # plot results
# test_scores = np.array(test_means).reshape(len(max_depth), len(min_child_weight))
# train_scores = np.array(train_means).reshape(len(max_depth), len(min_child_weight))
#
# for i, value in enumerate(max_depth):
#     pyplot.plot(min_child_weight, -test_scores[i], label='test_max_depth:' + str(value))
# # for i, value in enumerate(min_child_weight):
# #    pyplot.plot(max_depth, train_scores[i], label= 'train_min_child_weight:'   + str(value))
#
# pyplot.legend()
# pyplot.xlabel('max_depth')
# pyplot.ylabel('Log Loss')
# pyplot.savefig('max_depth_vs_min_child_weght_1.png')
# 调整树的参数：subsample 和 colsample_bytree 'colsample_bytree': 0.7, 'subsample': 0.2
subsample = [i/10.0 for i in range(1,3)]
colsample_bytree = [i/10.0 for i in range(6,8)]
colsample_bylevel= [i/10.0 for i in range(6,8)]
param_test3_1 = dict(subsample=subsample, colsample_bytree=colsample_bytree,colsample_bylevel=colsample_bylevel)

xgb3_1 = XGBClassifier(
        learning_rate =0.1,
        n_estimators=bestn_estimators,  #第二轮参数调整得到的n_estimators最优值
        max_depth=best_max_depth,
        min_child_weight=best_min_child_weight,
        gamma=0,
        subsample=0.2,
        colsample_bytree=0.7,
        colsample_bylevel = 0.7,
        objective= 'multi:softprob',
        seed=33)


gsearch3_1 = GridSearchCV(xgb3_1, param_grid = param_test3_1, scoring='neg_log_loss', cv=kfold)
gsearch3_1.fit(X_train , y_train)

# summarize results
print("Best: %f using %s" % (gsearch3_1.best_score_, gsearch3_1.best_params_))
test_means = gsearch3_1.cv_results_['mean_test_score']
test_stds = gsearch3_1.cv_results_['std_test_score']
train_means = gsearch3_1.cv_results_['mean_train_score']
train_stds = gsearch3_1.cv_results_['std_train_score']

pd.DataFrame(gsearch3_1.cv_results_).to_csv('my_preds_subsampleh_colsample_bytree_1.csv')

# # plot results
# test_scores = np.array(test_means).reshape(len(colsample_bytree), len(subsample))
# train_scores = np.array(train_means).reshape(len(colsample_bytree), len(subsample))
#
# for i, value in enumerate(colsample_bytree):
#     pyplot.plot(subsample, -test_scores[i], label='test_colsample_bytree:' + str(value))
# # for i, value in enumerate(min_child_weight):
# #    pyplot.plot(max_depth, train_scores[i], label= 'train_min_child_weight:'   + str(value))
#
# pyplot.legend()
# pyplot.xlabel('subsample')
# pyplot.ylabel('Log Loss')
# pyplot.savefig('subsample_vs_colsample_bytree1.png')
# 调整正则化参数：reg_alpha 和reg_lambda
reg_alpha = [0.1,0.5]    #default = 0, 测试0.1,1，1.5，2
reg_lambda = [0.1,0.5]      #default = 1，测试0.1， 0.5， 1，2

param_test5_1 = dict(reg_alpha=reg_alpha, reg_lambda=reg_lambda)
xgb5_1 = XGBClassifier(
        learning_rate =0.1,
        n_estimators=bestn_estimators,  #第二轮参数调整得到的n_estimators最优值
        max_depth=best_max_depth,
        min_child_weight=best_min_child_weight,
        gamma=0,
        subsample=best_subsample,
        colsample_bytree=best_colsample_bytree,
        colsample_bylevel = best_colsample_bylevel,
        objective= 'multi:softprob',
        seed=33)


gsearch5_1 = GridSearchCV(xgb5_1, param_grid = param_test5_1, scoring='neg_log_loss', cv=kfold)
gsearch5_1.fit(X_train , y_train)

# summarize results
print("Best: %f using %s" % (gsearch5_1.best_score_, gsearch5_1.best_params_))
test_means = gsearch5_1.cv_results_['mean_test_score']
test_stds = gsearch5_1.cv_results_['std_test_score']
train_means = gsearch5_1.cv_results_['mean_train_score']
train_stds = gsearch5_1.cv_results_['std_train_score']

pd.DataFrame(gsearch5_1.cv_results_).to_csv('my_preds_reg_alpha_reg_lambda_1.csv')

# plot results
test_scores = np.array(test_means).reshape(len(reg_alpha), len(reg_lambda))
train_scores = np.array(train_means).reshape(len(reg_alpha), len(reg_lambda))

# log_reg_alpha = [0,0,0,0]
# for index in range(len(reg_alpha)):
#   log_reg_alpha[index] = math.log10(reg_alpha[index])

for i, value in enumerate(reg_alpha):
    pyplot.plot(reg_lambda, -test_scores[i], label='reg_alpha:' + str(value))
# for i, value in enumerate(min_child_weight):
#    pyplot.plot(max_depth, train_scores[i], label= 'train_min_child_weight:'   + str(value))

pyplot.legend()
pyplot.xlabel('reg_alpha')
pyplot.ylabel('-Log Loss')
pyplot.savefig('reg_alpha_vs_reg_lambda1.png')

# Predict test set:
data_test=pd.read_csv('RentListingInquries_FE_test.csv')
X_test = np.array(data_test)
data_test['interest_level'] = gsearch5_1.predict(X_test)
data_test.to_csv('data_test_predict.csv')


pyplot.show()