from sklearn.datasets import load_wine
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import matplotlib as mpl


def getK(x, y):
    k_range = range(1, 31)
    k_error = []
    # 循环，取k=1到k=31，查看误差效果
    for k in k_range:
        knn = KNeighborsClassifier(n_neighbors=k)
        # cv参数决定数据集划分比例，这里是按照5:1划分训练集和测试集
        y1 = y
        print(y1)
        scores = cross_val_score(knn, x, y1, cv=10, scoring='accuracy')
        k_error.append(1 - scores.mean())
        # 画图，x轴为k值，y值为误差值
    plt.plot(k_range, k_error)
    plt.xlabel('Value of K for KNN')
    plt.ylabel('Error')
    plt.show()


def KNNDemo():
    dataStruct = load_wine()
    data = dataStruct['data']
    target = dataStruct['target']

    dataLearn = []
    targetLearn = []
    dataTest = []
    targetTest = []
    testCount = {}
    getK(data, target)
    for i in range(len(target)):
        if testCount.get(target[i]) is None:
            testCount[target[i]] = 4
            targetTest.append(target[i])
            dataTest.append(data[i])
        elif testCount.get(target[i]) > 0:
            testCount[target[i]] -= 1
            targetTest.append(target[i])
            dataTest.append(data[i])
        else:
            targetLearn.append(target[i])
            dataLearn.append(data[i])
    print(dataLearn)
    print("=====================================")
    print(targetLearn)
    print("=====================================")
    print(dataTest)
    print("=====================================")
    print(targetTest)
    clf = KNeighborsClassifier(7)
    clf.fit(dataLearn, targetLearn)
    err = 0
    z = clf.predict(dataTest)
    print(z)
    for i in range(len(targetTest)):
        if z[i] != targetTest[i]:
            err += 1

    f = err * 100 / len(targetTest)
    print("错误绿： ", f)
