import numpy as np
from collections import Counter
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, classification_report
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False



class KNN:
    def __init__(self, k=3, distance_metric='euclidean'):
        self.k = k
        self.distance_metric = distance_metric
        self.X_train = None
        self.y_train = None

    def fit(self, X, y):
        """
        训练模型（kNN只是存储数据）
        """
        self.X_train = X
        self.y_train = y
        return self

    def _calculate_distance(self, x1, x2):
        """
        计算两个样本之间的距离
        """
        if self.distance_metric == 'euclidean':
            return np.sqrt(np.sum((x1 - x2) ** 2))
        elif self.distance_metric == 'manhattan':
            return np.sum(np.abs(x1 - x2))
        elif self.distance_metric == 'minkowski':
            # 这里使用p=3作为示例
            return np.sum(np.abs(x1 - x2) ** 3) ** (1 / 3)
        else:
            raise ValueError("不支持的距離度量方法")

    def predict(self, X):
        """
        预测类别
        """
        predictions = [self._predict_single(x) for x in X]
        return np.array(predictions)

    def _predict_single(self, x):
        """
        预测单个样本的类别
        """
        # 计算所有训练样本与当前样本的距离
        distances = []
        for i, x_train in enumerate(self.X_train):
            dist = self._calculate_distance(x, x_train)
            distances.append((dist, self.y_train[i]))

        # 按距离排序并选择前k个邻居
        distances.sort(key=lambda x: x[0])
        k_nearest = distances[:self.k]

        # 获取k个邻居的标签
        k_labels = [label for _, label in k_nearest]

        # 返回最常见的标签
        most_common = Counter(k_labels).most_common(1)
        return most_common[0][0]

    def predict_proba(self, X):
        """
        预测概率（每个类别的概率）
        """
        probas = []
        for x in X:
            # 计算所有训练样本与当前样本的距离
            distances = []
            for i, x_train in enumerate(self.X_train):
                dist = self._calculate_distance(x, x_train)
                distances.append((dist, self.y_train[i]))

            # 按距离排序并选择前k个邻居
            distances.sort(key=lambda x: x[0])
            k_nearest = distances[:self.k]

            # 获取k个邻居的标签
            k_labels = [label for _, label in k_nearest]

            # 计算每个类别的概率
            label_counts = Counter(k_labels)
            proba = {label: count / self.k for label, count in label_counts.items()}
            probas.append(proba)

        return probas


# 示例：使用鸢尾花数据集测试kNN算法
def example_usage():
    # 加载数据
    iris = load_iris()
    X, y = iris.data, iris.target

    # 数据分割
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.3, random_state=42, stratify=y
    )

    # 数据标准化
    scaler = StandardScaler()
    X_train = scaler.fit_transform(X_train)
    X_test = scaler.transform(X_test)

    # 创建并训练kNN模型
    knn = KNN(k=5, distance_metric='euclidean')
    knn.fit(X_train, y_train)

    # 预测
    y_pred = knn.predict(X_test)

    # 评估模型
    accuracy = accuracy_score(y_test, y_pred)
    print(f"准确率: {accuracy:.4f}")
    print("\n分类报告:")
    print(classification_report(y_test, y_pred, target_names=iris.target_names))

    return knn, X_test, y_test, y_pred


# 可视化结果
def plot_results(X_test, y_test, y_pred, feature_names, target_names):
    """
    可视化预测结果（使用前两个特征）
    """
    plt.figure(figsize=(12, 5))

    # 真实标签
    plt.subplot(1, 2, 1)
    scatter = plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap='viridis', alpha=0.7)
    plt.xlabel(feature_names[0])
    plt.ylabel(feature_names[1])
    plt.title('真实标签')
    plt.colorbar(scatter, ticks=range(len(target_names)))

    # 预测标签
    plt.subplot(1, 2, 2)
    scatter = plt.scatter(X_test[:, 0], X_test[:, 1], c=y_pred, cmap='viridis', alpha=0.7)
    plt.xlabel(feature_names[0])
    plt.ylabel(feature_names[1])
    plt.title('预测标签')
    plt.colorbar(scatter, ticks=range(len(target_names)))

    plt.tight_layout()
    plt.show()


# 测试不同k值的影响
def test_different_k():
    iris = load_iris()
    X, y = iris.data, iris.target

    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.3, random_state=42, stratify=y
    )

    scaler = StandardScaler()
    X_train = scaler.fit_transform(X_train)
    X_test = scaler.transform(X_test)

    k_values = range(1, 16)
    accuracies = []

    for k in k_values:
        knn = KNN(k=k)
        knn.fit(X_train, y_train)
        y_pred = knn.predict(X_test)
        accuracy = accuracy_score(y_test, y_pred)
        accuracies.append(accuracy)
        print(f"k={k}: 准确率 = {accuracy:.4f}")

    # 绘制k值与准确率的关系
    plt.figure(figsize=(10, 6))
    plt.plot(k_values, accuracies, 'bo-', linewidth=2, markersize=8)
    plt.xlabel('k值')
    plt.ylabel('准确率')
    plt.title('k值对kNN算法性能的影响')
    plt.grid(True, alpha=0.3)
    plt.show()

    # 找到最佳k值
    best_k = k_values[np.argmax(accuracies)]
    best_accuracy = max(accuracies)
    print(f"\n最佳k值: {best_k}, 最高准确率: {best_accuracy:.4f}")


if __name__ == "__main__":
    print("=== kNN算法实现示例 ===\n")

    # 基本使用示例
    print("1. 基本使用示例:")
    knn_model, X_test, y_test, y_pred = example_usage()

    # 可视化结果
    iris = load_iris()
    plot_results(X_test, y_test, y_pred, iris.feature_names, iris.target_names)

    # 测试不同k值
    print("\n2. 测试不同k值的影响:")
    test_different_k()

    # 预测概率示例
    print("\n3. 预测概率示例:")
    probas = knn_model.predict_proba(X_test[:3])
    for i, proba in enumerate(probas):
        print(f"样本 {i + 1} 的预测概率: {proba}")