import numpy as np
import sklearn.datasets as datasets
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
import collections as co

# 1.捕获鸢尾花数据
iris = datasets.load_iris()

# 2.提取样本数据
feature = iris['data']  # 特征数据
target = iris['target']  # 目标数据

# 3.对数据集进行拆分
x_train, x_test, y_train, y_test = train_test_split(feature, target, test_size=0.2, random_state=2021)

# 4.观察数据集：检查是否需要进行特征工程的处理
print(x_train)


def knn(k,predictpoint,x_train,y_train):
    # 计算xi-yi
    matrixtemp = (x_train - predictpoint)
    # 计算(xi-yi)的平方
    matrixtemp2 = np.square(matrixtemp)
    # 计算欧式距离， 此处一定要设置axis=1,它表示按行的方向相加，返回每个行的值；
    # axis=0按列相加，返回每个列的值；如果不写则返回的是所有值相加的结构
    distance = np.sqrt(np.sum(matrixtemp2, axis=1))
    # 对distance的集合元素冲销到大排序，返回的是下表 np.sort()只是把集合里面的内容进行排序
    sortindex = np.argsort(distance)
    # 用排序的sortindex来操作lable集合
    sortlabel = y_train[sortindex]
    # 返回前k个中出现最多次数的标签值
    return co.Counter(sortlabel[0:k]).most_common(1)[0][0]
if __name__ == '__main__':
    iris = load_iris()
    iris_feature = iris.data
    iris_label = iris.target
    k=5
    x_train, x_test, y_train, y_test = train_test_split(iris_feature, iris_label, test_size=0.3, random_state=30)
    count=0
    predict=[]
    for predictpoint in x_test:
        result=knn(k,predictpoint,x_train,y_train)
        predict.append(result)
    predict=np.array(predict)
    print('模型的分类结果:', predict)
    print('真实的分类结果:', y_test)
    #预测、分类结果的评分
    print('正确率:',np.sum((predict==y_test)/y_test.size))
