import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from os import environ
#from xgboost.sklearn import XGBClassifier
from sklearn import metrics   #Additional scklearn functions
#from sklearn.grid_search import GridSearchCV   #Perforing grid search
#
#import matplotlib.pylab as plt
#from matplotlib.pylab import rcParams
 
#记录程序运行时间
import time 
start_time = time.time()
 
#读入数据
train = pd.read_csv("./train - no quintuple.csv")
tests = pd.read_csv("./test3_0_1_combined.csv") 
 
params={
'n_estimatores':70,
'booster':'gbtree',
'objective': 'multi:softmax', #多分类的问题
'num_class':2, # 类别数，与 multisoftmax 并用
'gamma':0.2,  # 用于控制是否后剪枝的参数,越大越保守，一般0.1、0.2这样子。
'scale_pos_weight':180, #正负样本权重占比
'max_depth':6, # 构建树的深度，越大越容易过拟合
'alpha':1,
'lambda':1,  # 控制模型复杂度的权重值的L2正则化项参数，参数越大，模型越不容易过拟合。
'subsample':0.7, # 随机采样训练样本
'colsample_bytree':0.7, # 生成树时进行的列采样
'learning_rate':0.05,
'min_child_weight':4, 
# 这个参数默认是 1，是每个叶子里面 h 的和至少是多少，对正负样本不均衡时的 0-1 分类而言
#，假设 h 在 0.01 附近，min_child_weight 为 1 意味着叶子节点中最少需要包含 100 个样本。
#这个参数非常影响结果，控制叶子节点中二阶导的和的最小值，该参数值越小，越容易 overfitting。 
'silent':0 ,#设置成1则没有运行信息输出，最好是设置为0.
'eta': 0.3, # 如同学习率
'seed':1000,
'nthread':7,# cpu 线程数
#'eval_metric': 'auc'
}
 
plst = list(params.items())
num_rounds = 10000 # 迭代次数
 
train_xy,val = train_test_split(train, test_size = 0.3,random_state=1)
#random_state is of big influence for val-auc
y = train_xy.label                    
X = train_xy.drop(['label'],axis=1)   
val_y = val.label
val_X = val.drop(['label'],axis=1)
test_y = np.array(tests.label)
test_x = tests.drop(['label'],axis=1)
 

xgb_val = xgb.DMatrix(val_X,label=val_y)
xgb_train = xgb.DMatrix(X, label=y)
xgb_test = xgb.DMatrix(test_x)

 
watchlist = [(xgb_train, 'train'),(xgb_val, 'val')]
 
# training model 
# early_stopping_rounds 当设置的迭代次数较大时，early_stopping_rounds 可在一定的迭代次数内准确率没有提升就停止训练
model = xgb.train(plst, xgb_train, num_rounds, evals=watchlist, early_stopping_rounds=100)

model.save_model('./xgb_040.json') # 用于存储训练出的模型
print("best best_ntree_limit",model.best_iteration)
 
print("跑到这里了model.predict")
# ypreds = model.predict(xgb_test,model.best_iteration)
ypreds = model.predict(xgb_test,model.best_iteration)
preds = model.predict(xgb_test,iteration_range=(0, model.best_iteration))
test_y = np.array(test_y)

np.savetxt('./xgb_submission050.csv',np.c_[range(1,len(tests)+1),preds],delimiter=',',header='Id,Label',comments='',fmt='%d')
def suppress_qt_warnings():
    environ["QT_DEVICE_PIXEL_RATIO"] = "0"
    environ["QT_AUTO_SCREEN_SCALE_FACTOR"] = "1"
    environ["QT_SCREEN_SCALE_FACTORS"] = "1"
    environ["QT_SCALE_FACTOR"] = "1"


suppress_qt_warnings()
xgb.plot_importance(model)

# 测试评估参数
# 二分类适用
# auc = metrics.roc_auc_score(test_y,preds) #ypreds,multi_class='ovr',average='micro'
# print('auc: %.4f' % auc)
recall = metrics.recall_score(test_y,preds) #,average='micro'
print("Recall: %.4f" % recall)
F1_score = metrics.f1_score(test_y,preds) #,average='micro'
print('F1-score: %.4f' % F1_score)
precesion = metrics.precision_score(test_y,preds) #,average='micro'
print('Precesion: %.4f' % precesion)
acc = metrics.accuracy_score(test_y,preds)
print("acc: %.4f" % acc)

 
#输出运行时长
cost_time = time.time()-start_time
print("xgboost success!",'\n',"cost time:",cost_time,"(s)")