import numpy as np
from collections import Counter
import matplotlib.pyplot as plt

plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['axes.unicode_minus'] = False  # 解决坐标轴负号显示问题


class KNN:
    """
    K近邻算法实现类
    """

    def __init__(self, k=5):
        """
        初始化KNN分类器
        参数:
            k: 近邻数量，默认为5
        """
        self.k = k
        self.X_train = None
        self.y_train = None

    def fit(self, X, y):
        """
        训练模型（实际上就是存储训练数据）
        参数:
            X: 训练特征数据
            y: 训练标签数据
        """
        self.X_train = X
        self.y_train = y

    def _calculate_distance(self, x1, x2):
        """
        计算两个样本点之间的欧几里得距离
        参数:
            x1: 样本点1
            x2: 样本点2
        返回:
            欧几里得距离
        """
        return np.sqrt(np.sum((x1 - x2) ** 2))

    def _get_neighbors(self, test_sample):
        """
        获取测试样本的k个最近邻
        参数:
            test_sample: 测试样本
        返回:
            k个最近邻的标签列表
        """
        # 计算测试样本与所有训练样本的距离
        distances = []
        for i in range(len(self.X_train)):
            # 这里需要注意样本数据的归一化处理
            dist = self._calculate_distance(test_sample, self.X_train[i])
            distances.append((dist, self.y_train[i]))

        # 按距离排序
        distances.sort(key=lambda x: x[0])

        # 取前k个最近邻的标签
        neighbors = [distances[i][1] for i in range(self.k)]
        return neighbors

    def predict_single(self, test_sample):
        """
        预测单个样本
        参数:
            test_sample: 单个测试样本
        返回:
            预测的类别
        """
        # 获取k个最近邻
        neighbors = self._get_neighbors(test_sample)

        # 投票决定类别（取出现次数最多的类别）
        prediction = Counter(neighbors).most_common(1)[0][0]
        return prediction

    def predict(self, X_test):
        """
        预测多个样本
        参数:
            X_test: 测试特征数据
        返回:
            预测结果数组
        """
        predictions = []
        for test_sample in X_test:
            pred = self.predict_single(test_sample)
            predictions.append(pred)
        return np.array(predictions)

    def accuracy(self, X_test, y_test):
        """
        计算模型准确率
        参数:
            X_test: 测试特征数据
            y_test: 测试标签数据
        返回:
            准确率
        """
        predictions = self.predict(X_test)
        correct = np.sum(predictions == y_test)
        return correct / len(y_test)


def generate_dataset():
    """
    生成3个类别的数据集，每个类别100个样本
    返回:
        X: 特征数据 (300, 2)
        y: 标签数据 (300,)
    """
    np.random.seed(42)  # 设置随机种子确保结果可重复

    # 类别0：围绕点(2, 2)的正态分布
    class0_x = np.random.normal(2, 0.8, 100)
    class0_y = np.random.normal(2, 0.8, 100)
    class0_data = np.column_stack((class0_x, class0_y))
    class0_labels = np.zeros(100)

    # 类别1：围绕点(6, 6)的正态分布
    class1_x = np.random.normal(6, 0.8, 100)
    class1_y = np.random.normal(6, 0.8, 100)
    class1_data = np.column_stack((class1_x, class1_y))
    class1_labels = np.ones(100)

    # 类别2：围绕点(2, 6)的正态分布
    class2_x = np.random.normal(2, 0.8, 100)
    class2_y = np.random.normal(6, 0.8, 100)
    class2_data = np.column_stack((class2_x, class2_y))
    class2_labels = np.full(100, 2)

    # 合并所有数据
    X = np.vstack((class0_data, class1_data, class2_data))
    y = np.hstack((class0_labels, class1_labels, class2_labels))

    return X, y


def train_test_split(X, y, test_size=0.2):
    """
    划分训练集和测试集
    参数:
        X: 特征数据
        y: 标签数据
        test_size: 测试集比例
    返回:
        X_train, X_test, y_train, y_test
    """
    # 随机打乱数据
    indices = np.random.permutation(len(X))
    test_count = int(len(X) * test_size)

    test_indices = indices[:test_count]
    train_indices = indices[test_count:]

    X_train = X[train_indices]
    X_test = X[test_indices]
    y_train = y[train_indices]
    y_test = y[test_indices]

    return X_train, X_test, y_train, y_test


def plot_dataset(X, y, title="数据集可视化"):
    """
    可视化数据集
    参数:
        X: 特征数据
        y: 标签数据
        title: 图表标题
    """
    plt.figure(figsize=(10, 8))
    colors = ['red', 'blue', 'green']
    labels = ['类别 0', '类别 1', '类别 2']

    for i in range(3):
        mask = y == i
        plt.scatter(X[mask, 0], X[mask, 1], c=colors[i], label=labels[i], alpha=0.7)

    plt.xlabel('特征 1')
    plt.ylabel('特征 2')
    plt.title(title)
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.show()


def evaluate_different_k(X_train, X_test, y_train, y_test):
    """
    评估不同k值的表现
    参数:
        X_train, X_test, y_train, y_test: 训练和测试数据
    """
    k_values = range(1, 21)
    accuracies = []

    print("不同k值的准确率:")
    print("-" * 20)

    for k in k_values:
        knn = KNN(k=k)
        knn.fit(X_train, y_train)
        acc = knn.accuracy(X_test, y_test)
        accuracies.append(acc)
        print(f"k={k:2d}: 准确率 = {acc:.4f}")

    # 绘制k值与准确率的关系图
    plt.figure(figsize=(10, 6))
    plt.plot(k_values, accuracies, marker='o')
    plt.xlabel('k值')
    plt.ylabel('准确率')
    plt.title('不同k值的KNN算法准确率')
    plt.grid(True, alpha=0.3)
    plt.show()

    # 找出最佳k值
    best_k = k_values[np.argmax(accuracies)]
    best_acc = max(accuracies)
    print(f"\n最佳k值: {best_k}, 最高准确率: {best_acc:.4f}")

    return best_k


def main():
    """
    主函数：完整的KNN算法演示流程
    """
    print("=" * 50)
    print("KNN (K近邻) 算法演示")
    print("=" * 50)

    # 1. 生成数据集
    print("1. 生成数据集...")
    X, y = generate_dataset()
    print(f"数据集大小: {X.shape[0]} 个样本, {X.shape[1]} 个特征")
    print(f"类别分布: {Counter(y)}")

    # 2. 可视化原始数据
    print("\n2. 可视化原始数据...")
    plot_dataset(X, y, "原始数据集 (3类，每类100个样本)")

    # 3. 划分训练集和测试集
    print("3. 划分训练集和测试集...")
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
    print(f"训练集大小: {X_train.shape[0]} 个样本")
    print(f"测试集大小: {X_test.shape[0]} 个样本")

    # 4. 使用默认k=5训练和测试KNN模型
    print("\n4. 训练KNN模型 (k=5)...")
    knn = KNN(k=5)
    knn.fit(X_train, y_train)

    # 5. 进行预测
    print("5. 进行预测...")
    predictions = knn.predict(X_test)
    accuracy = knn.accuracy(X_test, y_test)

    print(f"测试集准确率: {accuracy:.4f}")

    # 6. 显示部分预测结果
    print("\n6. 部分预测结果展示:")
    print("真实标签 | 预测标签")
    print("-" * 20)
    for i in range(min(10, len(y_test))):
        print(f"   {int(y_test[i]):2d}    |    {int(predictions[i]):2d}")

    # 7. 评估不同k值的表现
    print("\n7. 评估不同k值的表现...")
    best_k = evaluate_different_k(X_train, X_test, y_train, y_test)

    # 8. 使用最佳k值重新训练
    print(f"\n8. 使用最佳k值({best_k})重新训练...")
    best_knn = KNN(k=best_k)
    best_knn.fit(X_train, y_train)
    best_accuracy = best_knn.accuracy(X_test, y_test)
    print(f"最佳模型准确率: {best_accuracy:.4f}")

    print("\n" + "=" * 50)
    print("KNN算法演示完成！")
    print("=" * 50)


if __name__ == "__main__":
    main()
