import pandas as pd
#自定义列名
names = ['label','a1','a2','a3','a4','a5','a6','a7','a8','a9','a10','a11','a12','a13']
#读取数据，并覆盖原列名
dataset = pd.read_csv()

print('葡萄酒原始数据集：')
print(dataset)

#分别提取特征值和标签值
data = dataset.iloc[range(0,178),range(1,14)]
target=dataset.iloc[range(0,178),range(0,1)]

#使用箱形图来统计异常数据的分布情况
import matplotlib.pyplot as plt

plt.style.use('seaborn-darkgrid')
plt.rcParams['axes.unicode_minus']=False
data.plot(kind='box',subplots=True,layout=(3,5),sharex=False,sharey=False)
#查找异常数据并输出
p=data.boxplot(return_type='dict')
for i in range(13):
    y=p['fliers'][i].get_ydata()
    print('a',i+1,'中异常值：',y)
plt.show()

#加载数据集
import pandas as pd 
#导入数据集
names = ['label','a1','a2','a3','a4','a5','a6','a7','a8','a9','a10','a11','a12','a13']
dataset = pd.read_csv()
data = dataset.iloc[range(0,178),range(1,14)]
target=dataset.iloc[range(0,178),range(0,1)]
print(data)
#进行数据标准化处理
from sklearn import preprocessing
cdata = preprocessing.StandardScaler().fit_transform(data)
print(cdata)
#找到最优K值
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
x,y = cdata,target
x_train,x_test,y_train,y_test = train_test_split(x,y,random_state=0)

k_range = range(1,15)
k_error = []

for k in k_range:
    model  =KNeighborsClassifier(n_neighbors=k)
    scores = cross_val_score(model, x, y, cv=5, scoring='accuracy')
    k_error.append(1 - scores.mean())

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.plot(k_range, k_error, 'r-')
plt.xlabel('k的取值')
plt.ylabel('预测误差率')
plt.show()
#使用最优解K值来训练评估模型
from sklearn.metrics import accuracy_score

# k=9 时，训练模型
model = KNeighborsClassifier(n_neighbors=9)
model.fit(x_train, y_train)

# 对模型进行评估
pred = model.predict(x_test)
ac = accuracy_score(y_test, pred)

print("模型预测准确率：", ac)
print("测试集的预测标签：", pred)
print("测试集的真实标签：", y_test)
