import math
from collections import Counter

def euclidean_distance(x1, x2):
    if len(x1) != len(x2):
        raise ValueError("两个样本的维度必须一致")
    squared_diff_sum = sum((a - b) ** 2 for a, b in zip(x1, x2))
    return math.sqrt(squared_diff_sum)

def KNN(X_train, y_train, X_test, k=5, task="classification", distance_func=euclidean_distance):
    # 1. 输入参数校验
    if len(X_train) != len(y_train):
        raise ValueError("训练样本特征与标签数量必须一致")
    if k < 1 or k > len(X_train):
        raise ValueError(f"k值必须满足 1 ≤ k ≤ 训练样本数（当前训练样本数：{len(X_train)}）")
    if task not in ["classification", "regression"]:
        raise ValueError('task必须为 "classification" 或 "regression"')

    is_single_test = not isinstance(X_test[0], (list, tuple))
    if is_single_test:
        X_test = [X_test]

    # 2. 验证所有样本维度一致
    n_features = len(X_train[0]) if X_train else 0
    for sample in X_train + X_test:
        if len(sample) != n_features:
            raise ValueError("所有训练样本和测试样本必须具有相同的维度")

    # 3. 对每个测试样本执行预测
    predictions = []
    for test_sample in X_test:
        distances = []
        for train_sample, train_label in zip(X_train, y_train):
            dist = distance_func(test_sample, train_sample)
            distances.append((dist, train_label)) 

        distances.sort(key=lambda x: x[0]) 
        k_neighbors = distances[:k] 
        k_neighbor_labels = [label for (dist, label) in k_neighbors] 

        if task == "classification":
            vote_result = Counter(k_neighbor_labels).most_common(1)[0][0]
            predictions.append(vote_result)
        else:
            regression_result = sum(k_neighbor_labels) / len(k_neighbor_labels)
            predictions.append(regression_result)

    return predictions[0] if is_single_test else predictions