import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
from matplotlib.colors import ListedColormap

import matplotlib.pyplot as plt

plt.rcParams['font.sans-serif'] = ['Microsoft YaHei']
class KNN:
    def __init__(self, k=3, task='classification', label_num=None):

        self.k = k
        self.task = task
        self.label_num = label_num
        self.x_train = None
        self.y_train = None

    def fit(self, x_train, y_train):

        self.x_train = np.array(x_train)
        self.y_train = np.array(y_train)

        # 如果是分类任务且未指定类别数量，自动推断
        if self.task == 'classification' and self.label_num is None:
            self.label_num = len(np.unique(y_train))

        return self

    def euclidean_distance(self, a, b):

        return np.sqrt(np.sum(np.square(a - b)))

    def get_knn_indices(self, x):

        # 计算与所有训练样本的距离
        distances = [self.euclidean_distance(train_sample, x) for train_sample in self.x_train]
        # 按距离排序并获取前k个索引
        knn_indices = np.argsort(distances)[:self.k]
        return knn_indices

    def get_label(self, x):
        if self.task != 'classification':
            raise ValueError("此方法仅适用于分类任务")

        knn_indices = self.get_knn_indices(x)

        # 类别计数
        label_statistic = np.zeros(shape=[self.label_num])
        for index in knn_indices:
            label = int(self.y_train[index])
            label_statistic[label] += 1

        # 返回数量最多的类别
        return np.argmax(label_statistic)

    def predict_single(self, x):
        knn_indices = self.get_knn_indices(x)

        if self.task == 'classification':
            # 分类任务：多数投票
            neighbor_labels = [self.y_train[idx] for idx in knn_indices]
            most_common = Counter(neighbor_labels).most_common(1)
            return most_common[0][0]
        else:
            # 回归任务：取平均值
            neighbor_values = [self.y_train[idx] for idx in knn_indices]
            return np.mean(neighbor_values)

    def predict(self, x_test):
        x_test = np.array(x_test)
        predicted_labels = np.zeros(shape=[len(x_test)])

        for i, x in enumerate(x_test):
            predicted_labels[i] = self.predict_single(x)

        return predicted_labels

    def score(self, x_test, y_test):
        y_pred = self.predict(x_test)
        y_true = np.array(y_test)

        if self.task == 'classification':
            return np.mean(y_pred == y_true)
        else:
            return np.mean(np.square(y_pred - y_true))


# 测试MNIST数据集
def test_mnist():
    print("=== MNIST手写数字分类测试 ===")
    np.random.seed(42)
    n_samples = 1000
    n_features = 784  # 28x28像素

    # 生成模拟的MNIST数据
    x_train = np.random.randn(int(n_samples * 0.8), n_features)
    y_train = np.random.randint(0, 10, int(n_samples * 0.8))
    x_test = np.random.randn(int(n_samples * 0.2), n_features)
    y_test = np.random.randint(0, 10, int(n_samples * 0.2))

    print(f"训练集大小: {len(x_train)}")
    print(f"测试集大小: {len(x_test)}")

    # 测试不同的k值
    accuracies = []
    k_values = range(1, 10)

    for k in k_values:
        knn = KNN(k=k, task='classification', label_num=10)
        knn.fit(x_train, y_train)
        accuracy = knn.score(x_test, y_test)
        accuracies.append(accuracy)
        print(f'K的取值为 {k}, 预测准确率为 {accuracy * 100:.1f}%')

    # 绘制准确率随k值变化图
    plt.figure(figsize=(10, 6))
    plt.plot(k_values, accuracies, 'bo-', linewidth=2, markersize=8)
    plt.xlabel('K值')
    plt.ylabel('准确率')
    plt.title('MNIST数据集上KNN不同K值的准确率')
    plt.grid(True, alpha=0.3)
    plt.show()

    return accuracies
def test_gaussian():
    print("\n=== 高斯数据集分类测试 ===")

    # 生成高斯数据集
    np.random.seed(42)
    n_samples = 200

    # 生成两个高斯分布的数据
    mean1, cov1 = [2, 2], [[1, 0.5], [0.5, 1]]
    mean2, cov2 = [-2, -2], [[1, -0.3], [-0.3, 1]]

    class1 = np.random.multivariate_normal(mean1, cov1, n_samples // 2)
    class2 = np.random.multivariate_normal(mean2, cov2, n_samples // 2)

    x_data = np.vstack([class1, class2])
    y_data = np.hstack([np.zeros(n_samples // 2), np.ones(n_samples // 2)])

    # 可视化原始数据
    plt.figure(figsize=(8, 6))
    plt.scatter(x_data[y_data == 0, 0], x_data[y_data == 0, 1], c='blue', marker='o', label='Class 0', alpha=0.7)
    plt.scatter(x_data[y_data == 1, 0], x_data[y_data == 1, 1], c='red', marker='x', label='Class 1', alpha=0.7)
    plt.xlabel('X axis')
    plt.ylabel('Y axis')
    plt.title('高斯数据集分布')
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.show()

    return x_data, y_data


def visualize_decision_boundary(x_data, y_data, k_values=[1, 3, 10]):
    # 设置网格
    step = 0.1
    x_min, x_max = x_data[:, 0].min() - 1, x_data[:, 0].max() + 1
    y_min, y_max = x_data[:, 1].min() - 1, x_data[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, step),
                         np.arange(y_min, y_max, step))
    grid_data = np.c_[xx.ravel(), yy.ravel()]

    # 创建子图
    fig, axes = plt.subplots(1, len(k_values), figsize=(15, 5))
    cmap_light = ListedColormap(['lightblue', 'lightcoral'])

    for i, k in enumerate(k_values):
        # 训练KNN模型
        knn = KNN(k=k, task='classification', label_num=2)
        knn.fit(x_data, y_data)

        # 预测网格点
        z = knn.predict(grid_data)
        z = z.reshape(xx.shape)

        # 绘制决策边界
        ax = axes[i]
        ax.pcolormesh(xx, yy, z, cmap=cmap_light, alpha=0.8)
        ax.scatter(x_data[y_data == 0, 0], x_data[y_data == 0, 1],
                   c='blue', marker='o', label='Class 0', alpha=0.7)
        ax.scatter(x_data[y_data == 1, 0], x_data[y_data == 1, 1],
                   c='red', marker='x', label='Class 1', alpha=0.7)

        ax.set_xlabel('X axis')
        ax.set_ylabel('Y axis')
        ax.set_title(f'K = {k}')
        ax.legend()
        ax.grid(True, alpha=0.3)

    plt.tight_layout()
    plt.show()

def test_regression():
    print("\n=== KNN回归任务测试 ===")

    # 生成回归数据
    np.random.seed(42)
    x = np.linspace(0, 10, 100)
    y = np.sin(x) + 0.1 * np.random.randn(100)

    # 划分训练测试集
    split = int(0.8 * len(x))
    x_train, x_test = x[:split].reshape(-1, 1), x[split:].reshape(-1, 1)
    y_train, y_test = y[:split], y[split:]

    # 训练KNN回归模型
    knn = KNN(k=5, task='regression')
    knn.fit(x_train, y_train)
    y_pred = knn.predict(x_test)

    mse = knn.score(x_test, y_test)
    print(f"回归任务均方误差: {mse:.4f}")

    # 可视化回归结果
    plt.figure(figsize=(10, 6))
    plt.scatter(x_train, y_train, c='blue', alpha=0.6, label='训练数据')
    plt.scatter(x_test, y_test, c='green', alpha=0.6, label='测试数据')
    plt.scatter(x_test, y_pred, c='red', alpha=0.8, label='预测值')
    plt.xlabel('X')
    plt.ylabel('y')
    plt.title('KNN回归任务结果')
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.show()


if __name__ == "__main__":
    # 运行所有测试
    print("K近邻算法完整实现")
    print("=" * 50)

    # 1. MNIST分类测试
    mnist_accuracies = test_mnist()

    # 2. 高斯数据集测试
    x_gauss, y_gauss = test_gaussian()

    # 3. 可视化决策边界
    print("\n=== 决策边界可视化 ===")
    visualize_decision_boundary(x_gauss, y_gauss)

    # 4. 回归任务测试
    test_regression()

    # 总结
    print("\n=== 算法总结 ===")
    print("KNN算法特点:")
    print("1. 简单直观，易于理解和实现")
    print("2. 无需训练过程，但预测时计算复杂度高")
    print("3. 对异常值敏感，需要合适的K值选择")
    print("4. 适用于小到中等规模的数据集")