import math
import heapq

def euclidean_distance(x1, x2):
    if len(x1) != len(x2):
        raise ValueError("两个样本的维度必须一致")
    dist_sq = 0.0
    for a, b in zip(x1, x2):
        dist_sq += (a - b) ** 2
    return math.sqrt(dist_sq)

def knn_classify(train_data, train_labels, x, k=3, distance_func=euclidean_distance):
    if len(train_data) != len(train_labels):
        raise ValueError("训练数据与标签数量必须一致")
    if not isinstance(k, int) or k <= 0:
        raise ValueError("k必须是正整数")
    if k > len(train_data):
        raise ValueError("k不能大于训练样本数量")
    if len(train_data) == 0:
        raise ValueError("训练数据集不能为空")
    # 校验待预测样本与训练样本维度一致
    dim = len(train_data[0])
    if len(x) != dim:
        raise ValueError(f"待预测样本维度（{len(x)}）与训练样本维度（{dim}）不一致")
    distance_label = []
    for idx, train_x in enumerate(train_data):
        dist = distance_func(x, train_x)
        distance_label.append( (dist, train_labels[idx]) )
    # heapq.nlargest 取最大的k个，这里取负距离实现“最小k个”（比排序后切片更高效）
    k_nearest = heapq.nsmallest(k, distance_label, key=lambda item: item[0])
    label_count = {}
    for dist, label in k_nearest:
        if label in label_count:
            label_count[label] += 1
        else:
            label_count[label] = 1
    # 找出投票数最多的标签（若有平局，返回先出现的最多标签）
    max_count = 0
    result_label = None
    for label, count in label_count.items():
        if count > max_count:
            max_count = count
            result_label = label
    return result_label

class KNNClassifier:
    def __init__(self, k=3, distance_func=euclidean_distance):
        self.k = k
        self.distance_func = distance_func
        self.train_data = None
        self.train_labels = None
    def fit(self, train_data, train_labels):
        # 校验输入合法性（复用核心函数的校验逻辑）
        if len(train_data) != len(train_labels):
            raise ValueError("训练数据与标签数量必须一致")
        if len(train_data) == 0:
            raise ValueError("训练数据集不能为空")
        self.train_data = train_data
        self.train_labels = train_labels
    def predict(self, x):
        if self.train_data is None or self.train_labels is None:
            raise RuntimeError("请先调用fit()方法训练模型")
        return knn_classify(
            self.train_data, self.train_labels, x, self.k, self.distance_func
        )
    def predict_batch(self, X):
        return [self.predict(x) for x in X]

if __name__ == "__main__":
    train_data = [
        [5.1, 3.5], [4.9, 3.0], [4.7, 3.2], [4.6, 3.1], [5.0, 3.6],  # 0类
        [5.4, 3.9], [4.6, 3.4], [5.0, 3.4], [4.4, 2.9], [4.9, 3.1],  # 0类
        [6.0, 2.2], [5.8, 2.6], [5.6, 2.8], [5.9, 3.0], [5.5, 2.4],  # 1类
        [5.7, 2.8], [5.7, 2.6], [5.8, 2.7], [6.2, 2.9], [5.6, 2.2],  # 1类
        [6.3, 3.3], [5.8, 2.7], [7.1, 3.0], [6.3, 2.9], [6.5, 3.0],  # 2类
        [6.2, 3.4], [5.9, 3.0], [6.1, 3.0], [6.4, 2.8], [6.6, 3.0]   # 2类
    ]
    train_labels = [0]*10 + [1]*10 + [2]*10  # 对应3类标签
    # 待预测样本
    test_samples = [
        [5.0, 3.5],  # 预期标签0
        [5.8, 2.7],  # 预期标签1
        [6.4, 3.1],  # 预期标签2
        [5.2, 2.8]   # 预期标签1
    ]
    # 初始化并训练KNN分类器（k=5）
    knn = KNNClassifier(k=5)
    knn.fit(train_data, train_labels)
    predictions = knn.predict_batch(test_samples)
    print("KNN分类预测结果：")
    for idx, sample in enumerate(test_samples):
        print(f"样本{sample} -> 预测标签：{predictions[idx]}")