import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from scipy.spatial import distance

# Generate synthetic data for two classes
def generate_data():
    np.random.seed(42)
    X_class1 = np.random.randn(50, 2) + np.array([-2, -2])
    X_class2 = np.random.randn(50, 2) + np.array([2, 2])
    X = np.vstack((X_class1, X_class2))
    y = np.hstack((np.zeros(50), np.ones(50)))  # 0 for class1, 1 for class2
    return X, y

# KNN classification function
def knn_classify(X, y, new_point, k):
    distances = [distance.euclidean(new_point, x) for x in X]
    k_indices = np.argsort(distances)[:k]  # Get indices of k closest neighbors
    k_nearest_labels = y[k_indices]
    prediction = 1 if np.sum(k_nearest_labels) > (k / 2) else 0  # Predict majority class
    return prediction

# Visualization of KNN decision boundary
def visualize_knn_decision_boundary(X, y, k):
    # Define grid for plotting decision boundary
    x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
    y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1), np.arange(y_min, y_max, 0.1))
    grid_points = np.c_[xx.ravel(), yy.ravel()]
    
    # Classify each point in the grid
    Z = np.array([knn_classify(X, y, point, k) for point in grid_points])
    Z = Z.reshape(xx.shape)

    # Plot decision boundary and training data
    cmap_light = ListedColormap(['#FFAAAA', '#AAAAFF'])
    cmap_bold = ['blue', 'red']

    plt.figure(figsize=(10, 8))
    plt.contourf(xx, yy, Z, cmap=cmap_light, alpha=0.5)
    plt.scatter(X[y == 0][:, 0], X[y == 0][:, 1], color=cmap_bold[0], label='类别 0', edgecolor='k')
    plt.scatter(X[y == 1][:, 0], X[y == 1][:, 1], color=cmap_bold[1], label='类别 1', edgecolor='k')
    plt.xlabel("特征 1")
    plt.ylabel("特征 2")
    plt.title(f"K-最近邻分类决策边界 (K={k})")
    plt.legend()

    # Show initial plot
    plt.show()

    # Add new points and classify them interactively
    fig, ax = plt.subplots(figsize=(10, 8))
    ax.contourf(xx, yy, Z, cmap=cmap_light, alpha=0.5)
    ax.scatter(X[y == 0][:, 0], X[y == 0][:, 1], color=cmap_bold[0], label='类别 0', edgecolor='k')
    ax.scatter(X[y == 1][:, 0], X[y == 1][:, 1], color=cmap_bold[1], label='类别 1', edgecolor='k')
    ax.set_xlabel("特征 1")
    ax.set_ylabel("特征 2")
    ax.set_title(f"K-最近邻分类决策边界 (K={k})")
    ax.legend()

    while True:
        try:
            # Prompt for a new test point
            x_new = float(input("请输入特征 1 值: "))
            y_new = float(input("请输入特征 2 值: "))
            new_point = np.array([x_new, y_new])
            
            # Classify the new point and highlight nearest neighbors
            prediction = knn_classify(X, y, new_point, k)
            color = cmap_bold[int(prediction)]
            ax.scatter(new_point[0], new_point[1], color=color, edgecolor='black', s=100, marker='o')
            plt.draw()
            
            # Highlight k nearest neighbors
            distances = [distance.euclidean(new_point, x) for x in X]
            k_indices = np.argsort(distances)[:k]
            for i in k_indices:
                ax.plot([new_point[0], X[i, 0]], [new_point[1], X[i, 1]], color='gray', linestyle='--')
            plt.draw()

            print(f"预测类别: {prediction} (K={k} 最近邻)")
        
        except ValueError:
            print("输入无效，请输入有效的数字。")
            break

if __name__ == "__main__":
    # Generate data
    X, y = generate_data()
    
    # Set the value of k
    k = 5  # You can adjust k here

    # Visualize the decision boundary and classify new points
    visualize_knn_decision_boundary(X, y, k)