import numpy as np
from collections import Counter

# 计算两个数据点之间的欧几里得距离
def euclidean_distance(x1, x2):
    return np.sqrt(np.sum((x1 - x2) ** 2))

# 计算两个数据点之间的曼哈顿距离
def manhattan_distance(x1, x2):
    return np.sum(np.abs(x1 - x2))

class KNN:
    def __init__(self, k=3, distance_metric='euclidean'):
        self.k = k
        self.distance_metric = distance_metric
        self.X_train = None
        self.y_train = None

        # 选择距离计算函数
        if distance_metric == 'euclidean':
            self.distance_func = euclidean_distance
        elif distance_metric == 'manhattan':
            self.distance_func = manhattan_distance
        else:
            raise ValueError("不支持的距离度量方法，请使用'euclidean'或'manhattan'")
    # 训练KNN分类器
    def fit(self, X, y):
        self.X_train = np.array(X)
        self.y_train = np.array(y)
    # 使用KNN分类器进行预测
    def predict(self, X):
        X = np.array(X)
        predictions = []

        # 对每个测试样本进行预测
        for x in X:
            # 计算与所有训练样本的距离
            distances = [self.distance_func(x, x_train) for x_train in self.X_train]

            # 获取距离最近的k个样本的索引
            k_indices = np.argsort(distances)[:self.k]

            # 获取这k个样本的标签
            k_nearest_labels = self.y_train[k_indices]

            # 找出出现次数最多的标签
            most_common = Counter(k_nearest_labels).most_common(1)[0][0]
            predictions.append(most_common)

        return np.array(predictions)
    # 对单个样本进行预测
    def predict_single(self, x):
        x = np.array(x)
        # 计算与所有训练样本的距离
        distances = [self.distance_func(x, x_train) for x_train in self.X_train]

        # 获取距离最近的k个样本的索引
        k_indices = np.argsort(distances)[:self.k]

        # 获取这k个样本的标签
        k_nearest_labels = self.y_train[k_indices]

        # 找出出现次数最多的标签
        most_common = Counter(k_nearest_labels).most_common(1)[0][0]
        return most_common