from sklearn import datasets
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier  as baseModel
from sklearn.ensemble import AdaBoostClassifier as Model
from sklearn.model_selection import KFold
# from sklearn.linear_model import RidgeClassifier as Model
# from sklearn.svm import SVC as Model #gamma=0.001
from sklearn.neural_network import MLPClassifier as baseModel

digits = datasets.load_digits()
print(digits.data.shape)

x_train, x_test, y_train, y_test = train_test_split(digits.data, digits.target, test_size=0.3, random_state=33)


kf = KFold(n_splits=10)

record=[]
A=[]
for trn_index,tst_index in kf.split(x_train):
    X, y = x_train[trn_index], y_train[trn_index]
    Xt, yt = x_train[tst_index], y_train[tst_index]
    # 对于逻辑斯蒂回归和支持向量机，需要设置选项 ,algorithm='SAMME'
    model = Model(base_estimator=baseModel)
    model.fit(X, y)
    y_ = model.predict(x_test)
    acc = accuracy_score(y_test, y_)
    print("acc:%.3f%%"% (acc*100))
    A+=[y_]
    record+=[acc]

import pandas as pd
import numpy as np
df=pd.DataFrame(A)


y_pred=df.mode()
accuracy = accuracy_score(y_test, y_pred.ix[0,:])
print("accuracy:%.3f%%" % (accuracy * 100.0))
record+=[accuracy]


# y_pred=df.mean().astype(np.int)
# accuracy = accuracy_score(y_test, y_pred)
# print("accuracy:%.3f%%" % (accuracy * 100.0))






