import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt

# ======================== 解决中文乱码核心代码 ========================
plt.rcParams['font.sans-serif'] = ['SimSun']  # 指定 SimSun 为中文字体（Windows 系统默认）
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示为方块的问题


# ======================== 1. 加载数据集 ========================
def load_iris_data():
    """加载 Iris 数据集"""
    iris = load_iris()
    X = iris.data
    y = iris.target
    # 转换为 DataFrame（可选，方便查看数据）
    df = pd.DataFrame(X, columns=iris.feature_names)
    df['target'] = y
    return X, y, df


X, y, df = load_iris_data()
print("Iris 数据集形状:", X.shape, y.shape)
print("样本示例:\n", df.head())

# ======================== 2. 划分训练集和测试集 ========================
X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.2, random_state=42  # 测试集占比 20%
)
print(f"训练集大小: {X_train.shape}, 测试集大小: {X_test.shape}")


# ======================== 3. 手动实现 KNN 算法 ========================
class MyKNN:
    def __init__(self, k=3, distance_metric='euclidean'):
        """
        初始化 KNN
        :param k: 近邻数
        :param distance_metric: 距离度量，支持 'euclidean'（欧式距离）、'manhattan'（曼哈顿距离）
        """
        self.k = k
        self.distance_metric = distance_metric
        self.X_train = None
        self.y_train = None

    def fit(self, X, y):
        """训练（存储训练数据）"""
        self.X_train = X
        self.y_train = y

    def predict(self, X):
        """预测测试集"""
        predictions = []
        for x in X:
            # 计算当前样本与所有训练样本的距离
            distances = self._calculate_distances(x)
            # 按距离排序，取前 k 个索引
            k_indices = np.argsort(distances)[:self.k]
            # 取前 k 个标签
            k_nearest_labels = self.y_train[k_indices]
            # 投票法确定预测标签
            prediction = np.bincount(k_nearest_labels).argmax()
            predictions.append(prediction)
        return np.array(predictions)

    def _calculate_distances(self, x):
        """计算单个样本与所有训练样本的距离"""
        if self.distance_metric == 'euclidean':
            # 欧式距离
            return np.sqrt(np.sum((self.X_train - x) ** 2, axis=1))
        elif self.distance_metric == 'manhattan':
            # 曼哈顿距离
            return np.sum(np.abs(self.X_train - x), axis=1)
        else:
            raise ValueError("不支持的距离度量: {}".format(self.distance_metric))


# ======================== 4. 选择最优 K 值（手动实现版） ========================
def find_best_k(X_train, y_train, X_test, y_test, max_k=20, distance_metric='euclidean'):
    """
    遍历 K 值，找到最优 K（测试集准确率最高）
    :param max_k: 最大尝试 K 值
    :return: 最优 K 值、对应的准确率
    """
    best_k = 1
    best_accuracy = 0.0
    accuracies = []
    for k in range(1, max_k + 1):
        knn = MyKNN(k=k, distance_metric=distance_metric)
        knn.fit(X_train, y_train)
        y_pred = knn.predict(X_test)
        accuracy = accuracy_score(y_test, y_pred)
        accuracies.append(accuracy)
        if accuracy > best_accuracy:
            best_accuracy = accuracy
            best_k = k
    # 可视化 K 值与准确率关系（标题和标签已支持中文）
    plt.plot(range(1, max_k + 1), accuracies, marker='o')
    plt.xlabel('K 值')
    plt.ylabel('测试集准确率')
    plt.title(f'K 值与准确率关系（{distance_metric} 距离）')
    plt.grid(True)
    plt.show()
    return best_k, best_accuracy


# 搜索最优 K 值（欧式距离）
best_k_euclidean, accuracy_euclidean = find_best_k(
    X_train, y_train, X_test, y_test, max_k=20, distance_metric='euclidean'
)
print(f"最优 K 值（欧式距离）: {best_k_euclidean}, 测试集准确率: {accuracy_euclidean:.2%}")

# 尝试其他距离（曼哈顿距离）
best_k_manhattan, accuracy_manhattan = find_best_k(
    X_train, y_train, X_test, y_test, max_k=20, distance_metric='manhattan'
)
print(f"最优 K 值（曼哈顿距离）: {best_k_manhattan}, 测试集准确率: {accuracy_manhattan:.2%}")


# ======================== 5. 与 sklearn 的 KNN 对比 ========================
def compare_with_sklearn(X_train, y_train, X_test, y_test, best_k, distance_metric='euclidean'):
    """对比手动实现与 sklearn 的 KNN"""
    # 手动实现
    my_knn = MyKNN(k=best_k, distance_metric=distance_metric)
    my_knn.fit(X_train, y_train)
    my_pred = my_knn.predict(X_test)
    my_accuracy = accuracy_score(y_test, my_pred)

    # sklearn 实现
    if distance_metric == 'euclidean':
        sk_knn = KNeighborsClassifier(n_neighbors=best_k, metric='euclidean')
    elif distance_metric == 'manhattan':
        sk_knn = KNeighborsClassifier(n_neighbors=best_k, metric='manhattan')
    else:
        raise ValueError("不支持的距离度量")

    sk_knn.fit(X_train, y_train)
    sk_pred = sk_knn.predict(X_test)
    sk_accuracy = accuracy_score(y_test, sk_pred)

    print(f"手动实现 KNN（K={best_k}, {distance_metric}）准确率: {my_accuracy:.2%}")
    print(f"sklearn KNN（K={best_k}, {distance_metric}）准确率: {sk_accuracy:.2%}")
    print("预测结果是否一致:", np.array_equal(my_pred, sk_pred))


# 对比欧式距离
compare_with_sklearn(X_train, y_train, X_test, y_test, best_k_euclidean, distance_metric='euclidean')
# 对比曼哈顿距离
compare_with_sklearn(X_train, y_train, X_test, y_test, best_k_manhattan, distance_metric='manhattan')