import numpy as np
from collections import Counter


class KNN:
    def __init__(self, k=3):
        self.k = k
        self.X_train = None
        self.y_train = None

    # 训练模型，保存训练数据
    def fit(self, X_train, y_train):
        # X_train: 训练特征，形状为(n_samples, n_features)
        # y_train: 训练标签，形状为(n_samples,)
        self.X_train = X_train
        self.y_train = y_train

    # 预测测试数据的标签
    def predict(self, X_test):
        # X_test: 测试特征，形状为(n_samples, n_features)
        predictions = []
        for x in X_test:
            # 计算当前测试样本与所有训练样本的距离
            distances = [self._euclidean_distance(x, x_train) for x_train in self.X_train]

            # 获取距离最近的k个样本的索引
            k_indices = np.argsort(distances)[:self.k]

            # 获取这k个样本的标签
            k_labels = [self.y_train[i] for i in k_indices]

            # 投票选出最常见的标签
            most_common = Counter(k_labels).most_common(1)
            predictions.append(most_common[0][0])

        return np.array(predictions)

    # 计算两个点之间的欧氏距离
    def _euclidean_distance(self, point1, point2):
        return np.sqrt(np.sum((point1 - point2) ** 2))


# 测试
if __name__ == "__main__":
    # 创建示例数据
    # 训练数据：4个样本，每个样本2个特征
    X_train = np.array([[1, 2], [2, 3], [3, 1], [6, 5]])
    y_train = np.array([0, 0, 1, 1])  # 标签

    # 测试数据
    X_test = np.array([[2, 2], [5, 4]])

    # 创建KNN模型并训练
    knn = KNN(k=3)
    knn.fit(X_train, y_train)

    # 预测
    predictions = knn.predict(X_test)
    print("预测结果:", predictions)

    # 计算准确率（如果有真实标签）
    y_test_true = np.array([0, 1])  # 假设的真实标签
    accuracy = np.mean(predictions == y_test_true)
    print(f"准确率: {accuracy:.2f}")