from numpy import loadtxt
#调用xgboost分类器模型
from xgboost import XGBClassifier
#使用GridSearchCV查看到底迭代到什么程度的学习率是最好的
from sklearn.model_selection import GridSearchCV
#得出分数
from sklearn.metrics import accuracy_score
#K折交叉验证
from sklearn.model_selection import StratifiedKFold

if __name__ == '__main__':
    #加载数据
    dataset = loadtxt('diabetes.csv',delimiter = ",")
    #划分特征，前八个是特征，最后一个Y是属于label
    X = dataset[:,0:8]
    Y = dataset[:,8]
    #调用xgboost分类器模型,并做fit操作
    model = XGBClassifier()
    learning_rate = [0.0001,0.001,0.01,0.1,0.2,0.3]
    param_grid = dict(learning_rate = learning_rate)
    #调用K折交叉验证函数
    kfold = StratifiedKFold(n_splits = 10,shuffle = True,random_state = 7)
    #迭代验证最好的学习率
    grid_search = GridSearchCV(model,param_grid,scoring = "neg_log_loss",n_jobs = -1,cv = kfold)
    grid_result = grid_search.fit(X,Y)

    print("Best:%f using %s" % (grid_result.best_score_,grid_result.best_params_))

    means = grid_result.cv_results_['mean_test_score']
    params = grid_result.cv_results_['params']

    for mean,param in zip(means,params):
        print("%f with %r" % (mean,param))
