# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
# 使用鸢尾花数据集进行测试
from sklearn.datasets import load_iris
# 用于切分数据集，分为训练集和测试集
from sklearn.model_selection import train_test_split
# 用来计算分类预测的准确率
from sklearn.metrics import accuracy_score

# 数据加载和预处理
iris = load_iris()
df = pd.DataFrame(data=iris.data, columns=iris.feature_names)
df['class'] = iris.target
df['class'] = df['class'].map({0: iris.target_names[0], 1: iris.target_names[1], 2: iris.target_names[2]})
df.describe()
x = iris.data
y = iris.target.reshape(-1, 1)
# 划分训练集和测试集
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=35, stratify=y)
# 核心算法的实现
# 距离函数的定义
def l1_distance(a, b):
    return np.sum(np.abs(a-b), axis=1)
def l2_distance(a, b):
    return np.sqrt(np.sum(a-b) ** 2, axis=2)
# 分类器实现
class kNN(object):
    # 定义一个初始化方法
    # 构造犯法
    def __init__(self, n_neibor=1, dis_func=l1_distance):
        self.n_neibor = n_neibor
        self.dis_func = dis_func
    # 训练模型的方法
    def fit(self, x, y):
        self.x_train = x
        self.y_train = y

    # 预测方法
    def predict(self, x):
        # 初始化预测分类数组，0数组
        y_pred = np.zeros((x.shape[0], 1), dtype=self.y_train.dtype)
        # 遍历输入的X数据点
        # enumerate枚举，前面是序号i，后面是数据x_test
        for i,x_test in enumerate(x):
            # x_test跟所有的训练数据计算距离
            distance =l1_distance(self.x_train, x_test)
            # 得到的距离由远及近的排序，取出索引值
            nn_index = np.argsort(distance)
            # 选取最近的k个点，保存它们对应的类别
            nn_y = self.y_train[nn_index[:self.n_neibor]].ravel()
            # 统计类别中出现频率最高的那个，赋给y_pred[i]
            y_pred[i] = np.argmax(np.bincount(nn_y))
        return y_pred

# 测试
knn = kNN(n_neibor=3)
# 训练模型
knn.fit(x_train, y_train)
# 传入数据做预测
y_pre = knn.predict(x_test)
# 求出预测的准确率
accuracy = accuracy_score(y_test, y_pre)

print("预测准确率",accuracy)