import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

class KNN:
    def __init__(self, k=3):
        self.k = k
        self.X_train = None
        self.y_train = None

    def fit(self, X, y):
        self.X_train, self.y_train = np.array(X), np.array(y)
        return self

    def _distance(self, x1, x2, metric='euclidean'):
        if metric == 'euclidean':
            return np.sqrt(np.sum((x1 - x2) ** 2))
        elif metric == 'manhattan':
            return np.sum(np.abs(x1 - x2))
        raise ValueError("距离度量支持: 'euclidean' 或 'manhattan'")

    def predict(self, X, metric='euclidean'):
        X = np.array(X)
        return np.array([self._predict_single(x, metric) for x in X])

    def _predict_single(self, x, metric):
        distances = [self._distance(x, x_train, metric) for x_train in self.X_train]
        k_indices = np.argsort(distances)[:self.k]
        k_labels = self.y_train[k_indices]
        return Counter(k_labels).most_common(1)[0][0]

    def score(self, X, y, metric='euclidean'):
        return np.mean(self.predict(X, metric) == y)


if __name__ == "__main__":
    np.random.seed(42)

    # 生成数据
    class0 = np.column_stack((np.random.normal(0, 1, 50), np.random.normal(0, 1, 50)))
    class1 = np.column_stack((np.random.normal(3, 1, 50), np.random.normal(3, 1, 50)))

    X = np.vstack((class0, class1))
    y = np.hstack((np.zeros(50), np.ones(50)))

    # 打乱并分割
    indices = np.random.permutation(len(X))
    X, y = X[indices], y[indices]
    split = int(0.8 * len(X))
    X_train, X_test, y_train, y_test = X[:split], X[split:], y[:split], y[split:]

    print(f"训练集: {X_train.shape}, 测试集: {X_test.shape}")

    # 测试不同k值
    k_values = [1, 3, 5, 7, 9]
    accuracies = []

    for k in k_values:
        accuracy = KNN(k=k).fit(X_train, y_train).score(X_test, y_test)
        accuracies.append(accuracy)
        print(f"k={k}, 准确率: {accuracy:.4f}")

    # 可视化
    fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 5))

    # 数据分布
    ax1.scatter(X_train[y_train == 0, 0], X_train[y_train == 0, 1], c='red', label='类别 0', alpha=0.6)
    ax1.scatter(X_train[y_train == 1, 0], X_train[y_train == 1, 1], c='blue', label='类别 1', alpha=0.6)
    ax1.scatter(X_test[:, 0], X_test[:, 1], c='green', marker='x', label='测试点', s=100)
    ax1.set_xlabel('特征 1'), ax1.set_ylabel('特征 2'), ax1.set_title('数据分布')
    ax1.legend(), ax1.grid(True, alpha=0.3)

    # k值影响
    ax2.plot(k_values, accuracies, 'bo-', linewidth=2, markersize=8)
    ax2.set_xlabel('k值'), ax2.set_ylabel('准确率'), ax2.set_title('k值对准确率的影响')
    ax2.grid(True, alpha=0.3)

    # 决策边界
    best_k = k_values[np.argmax(accuracies)]
    knn = KNN(k=best_k).fit(X_train, y_train)

    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, 0.1))
    Z = knn.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)

    ax3.contourf(xx, yy, Z, alpha=0.3, cmap=plt.cm.RdYlBu)
    ax3.scatter(X_train[y_train == 0, 0], X_train[y_train == 0, 1], c='red', label='类别 0', alpha=0.6)
    ax3.scatter(X_train[y_train == 1, 0], X_train[y_train == 1, 1], c='blue', label='类别 1', alpha=0.6)
    ax3.set_xlabel('特征 1'), ax3.set_ylabel('特征 2'), ax3.set_title(f'决策边界 (k={best_k})')
    ax3.legend(), ax3.grid(True, alpha=0.3)

    plt.tight_layout()
    plt.show()

    # 距离度量比较
    print("\n距离度量比较:")
    knn = KNN(k=3).fit(X_train, y_train)
    for metric in ['euclidean', 'manhattan']:
        accuracy = knn.score(X_test, y_test, metric)
        print(f"{metric}距离准确率: {accuracy:.4f}")