import numpy as np
from collections import Counter
import matplotlib.pyplot as plt
plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['axes.unicode_minus'] = False

class KNearestNeighbors:
    def __init__(self, k=3):
        self.k = k
        self.X_train = None
        self.y_train = None

    def fit(self, X, y):
        self.X_train = np.array(X)
        self.y_train = np.array(y)

    def _distance(self, x1, x2):
        return np.sqrt(np.sum((x1 - x2) ** 2))

    def predict(self, X):
        X = np.array(X)
        predictions = []

        for test_point in X:
            distances = [(self._distance(test_point, train_point), label)
                         for train_point, label in zip(self.X_train, self.y_train)]

            distances.sort(key=lambda x: x[0])
            k_neighbors = [label for _, label in distances[:self.k]]

            most_common = Counter(k_neighbors).most_common(1)[0][0]
            predictions.append(most_common)

        return np.array(predictions)

    def score(self, X, y):
        y_pred = self.predict(X)
        return np.mean(y_pred == y)

if __name__ == "__main__":
    np.random.seed(42)

    class0 = np.random.normal([2, 2], 1, (50, 2))
    class1 = np.random.normal([6, 6], 1, (50, 2))

    X = np.vstack([class0, class1])
    y = np.hstack([np.zeros(50), np.ones(50)])

    split_idx = int(0.8 * len(X))
    X_train, X_test = X[:split_idx], X[split_idx:]
    y_train, y_test = y[:split_idx], y[split_idx:]

    print(f"训练集: {len(X_train)}, 测试集: {len(X_test)}")

    k_values = [1, 3, 5, 7, 9]
    accuracies = []

    for k in k_values:
        knn = KNearestNeighbors(k=k)
        knn.fit(X_train, y_train)
        accuracy = knn.score(X_test, y_test)
        accuracies.append(accuracy)
        print(f"k={k}, 准确率: {accuracy:.4f}")

    plt.figure(figsize=(12, 5))

    plt.subplot(1, 2, 1)
    plt.plot(k_values, accuracies, 'bo-')
    plt.xlabel('k值')
    plt.ylabel('准确率')
    plt.title('不同k值的准确率')
    plt.grid(True, alpha=0.3)
    plt.subplot(1, 2, 2)

    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.linspace(x_min, x_max, 50),
                         np.linspace(y_min, y_max, 50))

    knn = KNearestNeighbors(k=3)
    knn.fit(X_train, y_train)
    Z = knn.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)

    plt.contourf(xx, yy, Z, alpha=0.3)
    plt.scatter(X_train[y_train == 0, 0], X_train[y_train == 0, 1],
                c='blue', label='类别0-训练', alpha=0.7)
    plt.scatter(X_train[y_train == 1, 0], X_train[y_train == 1, 1],
                c='red', label='类别1-训练', alpha=0.7)
    plt.scatter(X_test[y_test == 0, 0], X_test[y_test == 0, 1],
                c='blue', marker='^', label='类别0-测试')
    plt.scatter(X_test[y_test == 1, 0], X_test[y_test == 1, 1],
                c='red', marker='^', label='类别1-测试')

    plt.xlabel('特征1')
    plt.ylabel('特征2')
    plt.title('KNN分类结果 (k=3)')
    plt.legend()
    plt.tight_layout()
    plt.show()

    new_samples = np.array([[3, 3], [5, 5], [2, 6]])
    predictions = knn.predict(new_samples)
    print("\n新样本预测:")
    for sample, pred in zip(new_samples, predictions):
        print(f"样本{sample} -> 类别{int(pred)}")