import numpy as np
input_file = 'F:/python学习资料/wine/wine.txt'
X = []
y = []
with open(input_file,'r') as f:
    for line in f.readlines():
        # data = [float(x) for x in line.split(",")]
        data = line[:-1].split(",")
        X.append(data[1:])
        y.append(data[0])

X =np.array(X).astype(float)
y =np.array(y)

from sklearn import model_selection
X_train,X_test,y_train,y_test = model_selection.train_test_split(X,y,test_size=0.25,random_state=5)
# 训练分类器
from sklearn.tree import DecisionTreeClassifier

classifier_DecisionTree = DecisionTreeClassifier()
classifier_DecisionTree.fit(X_train,y_train)

# 计算分类器的准确率
y_test_pred = classifier_DecisionTree.predict(X_test)
print("决策树")
accuracy = 100 * (y_test == y_test_pred).sum() / X_test.shape[0]
print("Accuracy of the classifier =",round(accuracy,2),'%')

from sklearn.metrics import confusion_matrix

confusion_mat = confusion_matrix(y_test,y_test_pred)
print(confusion_mat)

# 逻辑回归
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import MinMaxScaler
scaled = MinMaxScaler()
scaled.fit(X_train)
X_train_scaled = scaled.transform(X_train)
X_test_scaled = scaled.transform(X_test)
classification_lregr = LogisticRegression()
classification_lregr.fit(X_train_scaled,y_train)
confusion_lregr = confusion_matrix(y_test,classification_lregr.predict(X_test_scaled))

print("#####逻辑回归#####")
print("逻辑回归精度"+str(100*round(classification_lregr.score(X_test_scaled,y_test),2))+'%')
print(confusion_lregr)

# 贝叶斯
from sklearn.naive_bayes import GaussianNB
classification_nb = GaussianNB()
classification_nb.fit(X_train,y_train)
print("####贝叶斯分类器#####")

print("贝叶斯精度"+str(100*round(classification_nb.score(X_test,y_test),2))+'%')
confusion_nb = confusion_matrix(y_test,classification_nb.predict(X_test))
print(confusion_nb)