import pandas as pds
from numpy import *
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
import operator
import time


'''
获取训练数据的特征及分类
'''


def get_data():
    # 因为此csv文件中没有标题行,所以header参数为None
    csv_data = pds.read_csv(r"iris.csv", header=None)
    x = csv_data.iloc[:, 0:4].values
    y = csv_data.iloc[:, 4:5].values
    sx = preprocessing.scale(x)  # 标准化特征数据
    # 切分训练数据和测试数据
    train_data, test_data, train_labels, test_labels = train_test_split(sx, y, train_size=0.80, test_size=0.20, random_state=20)
    '''
    csv_data_tmp = csv_data.drop([4], axis=1)  # 去掉最后一列,只保留前4列特征数据
    csv_data_array = array(csv_data_tmp)  # 把特征数据转换为2维数组
    labels = array(csv_data).T[4]  # 获取每个特征对应的分类
    return csv_data_array, labels
    '''
    return train_data, test_data, train_labels.T, test_labels


'''
KNN算法的实现方法
参数test_data为4个元素的1维数组
k值默认为5
'''


def k_n_n(test_data, train_data, labels, k=5):
    data_size = train_data.shape[0]  # shape返回一个元组,例如:(行数, 列数)
    # 复制测试数据data_size行1次,目的是生成结构跟训练数据一致的2维数组,然后与训练数据相减
    different = tile(test_data, (data_size, 1)) - train_data
    s_diff = different**2  # 计算相减后的平方
    s_distance = sum(s_diff, axis=1)  # 计算平方和,横向相加,获得1维数组
    distance = s_distance**0.5  # 开平方,获取测试数据与每一个训练数据的距离
    sort_dis_index = distance.argsort()  # 从小到大排序,返回排序后数据的索引值
    labels_dict = {}
    labels_list = labels.tolist()[0]
    for i in range(0, k):
        label = labels_list[sort_dis_index[i]]
        # get(label, 0)如果没有此key,返回0
        labels_dict[label] = labels_dict.get(label, 0) + 1
    count = sorted(labels_dict.items(), key=operator.itemgetter(1), reverse=True)
    return count[0][0]


x1, x2, y1, y2 = get_data()
n = 0
time1 = time.time()
for x in range(0, len(x2)):
    rst = k_n_n(x2[x], x1, y1)
    if rst == y2[x][0]:
        n += 1
time2 = time.time()
rate = n / len(x2)
print("K值默认情况准确率为：" + str(round(rate, 3)))
print("K值默认情况下对20%的数据（共150个数据）进行分类大约共耗时：" + str(time2-time1) + "秒")
