import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
import pandas as pd
import seaborn as sns

# 设置中文字体
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

# 加载鸢尾花数据集
iris = load_iris()
X, y = iris.data, iris.target

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)

class KNNClassifier:
    def __init__(self, k=5, distance_metric='euclidean'):
        """
        初始化KNN分类器
        :param k: K值，默认为5
        :param distance_metric: 距离度量方法，默认为欧氏距离
        """
        self.k = k
        self.distance_metric = distance_metric
    
    def fit(self, X, y):
        """
        拟合模型，KNN是惰性学习，只需存储训练数据
        :param X: 训练特征
        :param y: 训练标签
        """
        self.X_train = X
        self.y_train = y
    
    def predict(self, X):
        """
        预测测试数据的类别
        :param X: 测试特征
        :return: 预测的类别数组
        """
        y_pred = [self._predict(x) for x in X]
        return np.array(y_pred)
    
    def _predict(self, x):
        """
        预测单个样本的类别
        :param x: 单个样本特征
        :return: 预测的类别
        """
        # 计算距离
        if self.distance_metric == 'euclidean':
            distances = [self._euclidean_distance(x, x_train) for x_train in self.X_train]
        elif self.distance_metric == 'manhattan':
            distances = [self._manhattan_distance(x, x_train) for x_train in self.X_train]
        elif self.distance_metric == 'minkowski':
            distances = [self._minkowski_distance(x, x_train) for x_train in self.X_train]
        else:
            raise ValueError("不支持的距离度量方法")
        
        # 获取最近的k个邻居的索引
        k_indices = np.argsort(distances)[:self.k]
        # 获取最近的k个邻居的标签
        k_nearest_labels = [self.y_train[i] for i in k_indices]
        # 投票决定类别
        most_common = np.bincount(k_nearest_labels).argmax()
        return most_common
    
    def _euclidean_distance(self, x1, x2):
        """计算欧氏距离"""
        return np.sqrt(np.sum((x1 - x2) ** 2))
    
    def _manhattan_distance(self, x1, x2):
        """计算曼哈顿距离"""
        return np.sum(np.abs(x1 - x2))
    
    def _minkowski_distance(self, x1, x2, p=3):
        """计算闵可夫斯基距离，p=3"""
        return np.power(np.sum(np.power(np.abs(x1 - x2), p)), 1/p)

# 寻找最优K值
def find_best_k(X_train, y_train, X_test, y_test, max_k=20, distance_metric='euclidean', show_plot=True):
    """
    寻找最优K值
    :param X_train: 训练特征
    :param y_train: 训练标签
    :param X_test: 测试特征
    :param y_test: 测试标签
    :param max_k: 最大K值
    :param distance_metric: 距离度量方法
    :param show_plot: 是否显示图表
    :return: 最优K值和对应的准确率
    """
    best_k = 1
    best_accuracy = 0
    k_range = range(1, max_k + 1)
    accuracy_scores = []
    
    for k in k_range:
        knn = KNNClassifier(k=k, distance_metric=distance_metric)
        knn.fit(X_train, y_train)
        y_pred = knn.predict(X_test)
        accuracy = accuracy_score(y_test, y_pred)
        accuracy_scores.append(accuracy)
        
        if accuracy > best_accuracy:
            best_accuracy = accuracy
            best_k = k
    
    # 绘制K值与准确率的关系图
    plt.figure(figsize=(10, 6))
    plt.plot(k_range, accuracy_scores, marker='o')
    plt.title(f'不同K值对模型准确率的影响 ({distance_metric}距离)')
    plt.xlabel('K值')
    plt.ylabel('准确率')
    plt.xticks(k_range)
    plt.grid(True)
    plt.tight_layout()
    plt.savefig(f'k_value_accuracy_{distance_metric}.png')
    
    if show_plot:
        plt.show()
    else:
        plt.close()
    
    return best_k, best_accuracy

# 实验1: 使用欧氏距离寻找最优K值
best_k_euclidean, accuracy_euclidean = find_best_k(X_train, y_train, X_test, y_test)


# 实验2: 使用曼哈顿距离寻找最优K值
best_k_manhattan, accuracy_manhattan = find_best_k(X_train, y_train, X_test, y_test, distance_metric='manhattan')


# 实验3: 使用闵可夫斯基距离寻找最优K值
best_k_minkowski, accuracy_minkowski = find_best_k(X_train, y_train, X_test, y_test, distance_metric='minkowski')


# 实验4: 使用sklearn的KNN
sklearn_knn = KNeighborsClassifier(n_neighbors=best_k_euclidean)
sklearn_knn.fit(X_train, y_train)
y_pred_sklearn = sklearn_knn.predict(X_test)
accuracy_sklearn = accuracy_score(y_test, y_pred_sklearn)


# 生成混淆矩阵
def plot_confusion_matrix(y_true, y_pred, title, filename, show_plot=True):
    """
    绘制混淆矩阵
    :param y_true: 真实标签
    :param y_pred: 预测标签
    :param title: 图表标题
    :param filename: 保存文件名
    :param show_plot: 是否显示图表
    """
    cm = confusion_matrix(y_true, y_pred)
    plt.figure(figsize=(8, 6))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', 
                xticklabels=iris.target_names, yticklabels=iris.target_names)
    plt.xlabel('预测类别')
    plt.ylabel('真实类别')
    plt.title(title)
    plt.tight_layout()
    plt.savefig(filename)
    
    if show_plot:
        plt.show()
    else:
        plt.close()

# 为欧氏距离和sklearn模型绘制混淆矩阵
knn_euclidean = KNNClassifier(k=best_k_euclidean)
knn_euclidean.fit(X_train, y_train)
y_pred_euclidean = knn_euclidean.predict(X_test)

plot_confusion_matrix(y_test, y_pred_euclidean, f'自定义KNN (k={best_k_euclidean}, 欧氏距离)', 'confusion_matrix_euclidean.png')
plot_confusion_matrix(y_test, y_pred_sklearn, f'sklearn KNN (k={best_k_euclidean})', 'confusion_matrix_sklearn.png')

# 保存实验结果到CSV
results = pd.DataFrame({
    '距离度量': ['欧氏距离', '曼哈顿距离', '闵可夫斯基距离', 'sklearn (欧氏距离)'],
    '最优K值': [best_k_euclidean, best_k_manhattan, best_k_minkowski, best_k_euclidean],
    '准确率': [accuracy_euclidean, accuracy_manhattan, accuracy_minkowski, accuracy_sklearn]
})
print(f"欧氏距离下最优K值: {best_k_euclidean}, 准确率: {accuracy_euclidean:.4f}")
print(f"曼哈顿距离下最优K值: {best_k_manhattan}, 准确率: {accuracy_manhattan:.4f}")
print(f"闵可夫斯基距离下最优K值: {best_k_minkowski}, 准确率: {accuracy_minkowski:.4f}")
print(f"sklearn KNN (k={best_k_euclidean}) 准确率: {accuracy_sklearn:.4f}")
results.to_csv('knn_experiment_results.csv', index=False)
