#逻辑回归
# from pandas import read_csv
# from sklearn.model_selection import ShuffleSplit
# from sklearn.linear_model import LogisticRegression
# from sklearn.model_selection import cross_val_score
# filename = 'pima_data.csv'
# names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
# data = read_csv(filename, names=names)
# array = data.values
# X = array[:, 0:8]
# Y = array[:, 8]
# n_splits = 10
# test_size = 0.33
# seed = 7
# kfold = ShuffleSplit(n_splits=n_splits, test_size=test_size, random_state=seed)
# model = LogisticRegression(multi_class='multinomial', max_iter=1100)
# result = cross_val_score(model, X, Y, cv=kfold)
# print("算法评估：%.3f%% (%.3f%%)" % (result.mean()*100, result.std()*100))

#线性判别分析（LDA）
# from pandas import read_csv
# from sklearn.model_selection import KFold
# from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
# from sklearn.model_selection import cross_val_score
# filename = 'pima_data.csv'
# names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
# data = read_csv(filename, names=names)
# array = data.values
# X = array[:, 0:8]
# Y = array[:, 8]
# num_fold = 10
# seed = 7
# kfold = KFold(n_splits=num_fold, random_state=seed, shuffle=True)
# model = LinearDiscriminantAnalysis()
# result = cross_val_score(model, X, Y, cv=kfold)
# print(result.mean())

#k近邻算法 k个相似属于一类
# from pandas import read_csv
# from sklearn.model_selection import KFold
# from sklearn.neighbors import KNeighborsClassifier
# from sklearn.model_selection import cross_val_score
# filename = 'pima_data.csv'
# names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
# data = read_csv(filename, names=names)
# array = data.values
# X = array[:, 0:8]
# Y = array[:, 8]
# num_fold = 10
# seed = 7
# kfold = KFold(n_splits=num_fold, random_state=seed, shuffle=True)
# model = KNeighborsClassifier()
# result = cross_val_score(model, X, Y, cv=kfold)
# print(result.mean())

#贝叶斯分类器：概率 朴素贝叶斯
# from pandas import read_csv
# from sklearn.model_selection import KFold
# from sklearn.naive_bayes import GaussianNB
# from sklearn.model_selection import cross_val_score
# filename = 'pima_data.csv'
# names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
# data = read_csv(filename, names=names)
# array = data.values
# X = array[:, 0:8]
# Y = array[:, 8]
# num_fold = 10
# seed = 7
# kfold = KFold(n_splits=num_fold, random_state=seed, shuffle=True)
# model = GaussianNB()
# result = cross_val_score(model, X, Y, cv=kfold)
# print(result.mean())

#分类回归树  CART 决策树 基于基尼指数 是否 树的生成 树的剪枝
# from pandas import read_csv
# from sklearn.model_selection import KFold
# from sklearn.tree import DecisionTreeClassifier
# from sklearn.model_selection import cross_val_score
# filename = 'pima_data.csv'
# names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
# data = read_csv(filename, names=names)
# array = data.values
# X = array[:, 0:8]
# Y = array[:, 8]
# num_fold = 10
# seed = 7
# kfold = KFold(n_splits=num_fold, random_state=seed, shuffle=True)
# model = DecisionTreeClassifier()
# result = cross_val_score(model, X, Y, cv=kfold)
# print(result.mean())

#SVM
from pandas import read_csv
from sklearn.model_selection import KFold
from sklearn.svm import SVC
from sklearn.model_selection import cross_val_score
filename = 'pima_data.csv'
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
data = read_csv(filename, names=names)
array = data.values
X = array[:, 0:8]
Y = array[:, 8]
num_fold = 10
seed = 7
kfold = KFold(n_splits=num_fold, random_state=seed, shuffle=True)
model = SVC()
result = cross_val_score(model, X, Y, cv=kfold)
print(result.mean())


