import numpy as np
from collections import Counter


def euclidean_distance(x1, x2):
    """计算两个向量之间的欧氏距离。"""
    return np.sqrt(np.sum((x1 - x2) ** 2))


class KNN:
    def __init__(self, k=3):
        self.k = k
        self.X_train = None
        self.y_train = None

    def fit(self, X, y):
        self.X_train = X
        self.y_train = y

    def predict(self, X):
        # 对输入的每个样本进行预测
        predictions = [self._predict(x) for x in X]
        return np.array(predictions)

    def _predict(self, x):
        """对单个数据点进行预测（内部辅助函数）。"""
        # 1. 计算当前数据点与所有训练数据点的距离
        distances = [euclidean_distance(x, x_train) for x_train in self.X_train]

        # 2. 按距离排序，并取前 k 个最近的邻居的索引
        k_indices = np.argsort(distances)[:self.k]

        # 3. 获取这 k 个邻居的类别
        k_nearest_labels = [self.y_train[i] for i in k_indices]

        # 4. 进行投票，找出出现次数最多的类别
        most_common = Counter(k_nearest_labels).most_common(1)

        # 返回预测的类别
        return most_common[0][0]


# --- 使用示例 ---
if __name__ == '__main__':
    # 1. 创建一些简单的训练数据
    # X 是特征向量，这里有两个特征
    X_train = np.array([[1, 2], [1.5, 1.8], [5, 8], [8, 8], [1, 0.6], [9, 11]])
    # y 是对应的类别标签
    y_train = np.array([0, 0, 1, 1, 0, 1])

    # 2. 初始化 KNN 分类器，设置 k=3
    clf = KNN(k=3)

    # 3. 训练模型（实际上只是存储数据）
    clf.fit(X_train, y_train)

    # 4. 创建一个新的数据点进行预测
    X_new = np.array([[2, 3], [7, 7], [0, 0]])

    # 5. 进行预测
    predictions = clf.predict(X_new)

    # 6. 打印结果
    for i, pred in enumerate(predictions):
        print(f"数据点 {X_new[i]} 的预测类别是: {pred}")