import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
import matplotlib.pyplot as plt
import seaborn as sns

# 设置中文字体
plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei", "Heiti TC"]
plt.rcParams['axes.unicode_minus'] = False  # 解决负号显示问题

# 加载Iris数据集
iris = load_iris()
X = iris.data
y = iris.target
feature_names = iris.feature_names
target_names = iris.target_names

# 数据预处理
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# 分割数据集
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.3, random_state=42)


# 自定义KNN实现
class MyKNN:
    def __init__(self, n_neighbors=5, metric='euclidean'):
        self.n_neighbors = n_neighbors
        self.metric = metric
        self.X_train = None
        self.y_train = None

    def fit(self, X, y):
        self.X_train = X
        self.y_train = y
        return self  # 为了支持链式调用

    def predict(self, X):
        predictions = []
        for x in X:
            # 计算距离
            if self.metric == 'euclidean':
                distances = self._euclidean_distance(x, self.X_train)
            elif self.metric == 'manhattan':
                distances = self._manhattan_distance(x, self.X_train)
            elif self.metric == 'minkowski':
                distances = self._minkowski_distance(x, self.X_train, p=3)
            elif self.metric == 'chebyshev':
                distances = self._chebyshev_distance(x, self.X_train)
            else:
                raise ValueError(f"不支持的距离度量: {self.metric}")

            # 获取最近的K个邻居的索引
            k_indices = np.argsort(distances)[:self.n_neighbors]

            # 获取这些邻居的标签
            k_nearest_labels = [self.y_train[i] for i in k_indices]

            # 投票确定预测标签
            most_common = np.bincount(k_nearest_labels).argmax()
            predictions.append(most_common)

        return np.array(predictions)

    def score(self, X, y):
        """计算模型在给定数据上的准确率"""
        y_pred = self.predict(X)
        return accuracy_score(y, y_pred)

    def get_params(self, deep=True):
        """获取模型参数"""
        return {
            'n_neighbors': self.n_neighbors,
            'metric': self.metric
        }

    def set_params(self, **params):
        """设置模型参数"""
        for param, value in params.items():
            setattr(self, param, value)
        return self

    def _euclidean_distance(self, x1, X2):
        return np.sqrt(np.sum((X2 - x1) ** 2, axis=1))

    def _manhattan_distance(self, x1, X2):
        return np.sum(np.abs(X2 - x1), axis=1)

    def _minkowski_distance(self, x1, X2, p=3):
        return np.sum(np.abs(X2 - x1) ** p, axis=1) ** (1 / p)

    def _chebyshev_distance(self, x1, X2):
        return np.max(np.abs(X2 - x1), axis=1)


# 寻找最优K值
def find_best_k(X_train, y_train, max_k=30):
    accuracies = []
    k_values = range(1, max_k + 1)

    for k in k_values:
        knn = MyKNN(n_neighbors=k)
        # 使用5折交叉验证
        scores = cross_val_score(knn, X_train, y_train, cv=5)
        accuracies.append(np.mean(scores))

    # 找出最佳K值
    best_k = k_values[np.argmax(accuracies)]

    # 可视化 - 优化图表标题和样式
    plt.figure(figsize=(12, 8))
    plt.plot(k_values, accuracies, marker='o', linestyle='-', linewidth=2, markersize=8)
    plt.title('Iris数据集上K近邻算法的交叉验证准确率随K值变化', fontsize=16, pad=20)
    plt.xlabel('K值 (近邻数量)', fontsize=14)
    plt.ylabel('5折交叉验证平均准确率', fontsize=14)
    plt.grid(True, linestyle='--', alpha=0.7)
    plt.xticks(fontsize=12)
    plt.yticks(fontsize=12)

    # 标记最佳K值
    plt.axvline(x=best_k, color='r', linestyle='--', linewidth=2)
    plt.annotate(f'最佳K值: {best_k}\n准确率: {np.max(accuracies):.4f}',
                 xy=(best_k, np.max(accuracies)),
                 xytext=(best_k + 2, np.max(accuracies) - 0.03),
                 fontsize=12,
                 arrowprops=dict(facecolor='black', shrink=0.05, width=2, headwidth=8))

    plt.tight_layout()
    plt.show()

    return best_k, accuracies


# 评估不同距离度量的性能
def evaluate_distance_metrics(X_train, X_test, y_train, y_test, best_k):
    metrics = ['euclidean', 'manhattan', 'minkowski', 'chebyshev']
    results = {}

    for metric in metrics:
        knn = MyKNN(n_neighbors=best_k, metric=metric)
        knn.fit(X_train, y_train)
        y_pred = knn.predict(X_test)
        accuracy = accuracy_score(y_test, y_pred)
        results[metric] = accuracy
        print(f"距离度量: {metric}, 准确率: {accuracy:.4f}")

    # 可视化 - 优化图表标题和样式
    plt.figure(figsize=(12, 8))
    bars = plt.bar(results.keys(), results.values(), color=['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728'])

    plt.title(f'Iris数据集上K={best_k}时不同距离度量的分类准确率对比', fontsize=16, pad=20)
    plt.xlabel('距离度量方法', fontsize=14)
    plt.ylabel('测试集准确率', fontsize=14)
    plt.ylim(0.8, 1.0)
    plt.grid(axis='y', linestyle='--', alpha=0.7)
    plt.xticks(fontsize=12)
    plt.yticks(fontsize=12)

    # 添加数值标签
    for bar in bars:
        height = bar.get_height()
        plt.text(bar.get_x() + bar.get_width() / 2., height + 0.005,
                 f'{height:.4f}', ha='center', va='bottom', fontsize=12)

    plt.tight_layout()
    plt.show()

    return results


# 主程序
print("1. 寻找最优K值...")
best_k, accuracies = find_best_k(X_train, y_train)
print(f"最优K值: {best_k}, 交叉验证准确率: {np.max(accuracies):.4f}")

print("\n2. 评估不同距离度量的性能...")
distance_results = evaluate_distance_metrics(X_train, X_test, y_train, y_test, best_k)
best_metric = max(distance_results, key=distance_results.get)
print(f"最佳距离度量: {best_metric}, 准确率: {distance_results[best_metric]:.4f}")

print("\n3. 使用最佳参数训练最终模型...")
final_knn = MyKNN(n_neighbors=best_k, metric=best_metric)
final_knn.fit(X_train, y_train)
y_pred = final_knn.predict(X_test)
final_accuracy = accuracy_score(y_test, y_pred)
print(f"最终模型在测试集上的准确率: {final_accuracy:.4f}")
print("\n分类报告:")
print(classification_report(y_test, y_pred, target_names=target_names))

print("\n4. 与sklearn的KNN实现对比...")
sklearn_knn = KNeighborsClassifier(n_neighbors=best_k, metric=best_metric)
sklearn_knn.fit(X_train, y_train)
sklearn_pred = sklearn_knn.predict(X_test)
sklearn_accuracy = accuracy_score(y_test, sklearn_pred)
print(f"sklearn KNN在测试集上的准确率: {sklearn_accuracy:.4f}")
print("\n分类报告:")
print(classification_report(y_test, sklearn_pred, target_names=target_names))


# 可视化混淆矩阵
def plot_confusion_matrix(y_true, y_pred, title):
    cm = confusion_matrix(y_true, y_pred)
    plt.figure(figsize=(10, 8))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=target_names, yticklabels=target_names,
                annot_kws={"size": 14})

    # 优化标题和标签
    plt.title(title, fontsize=16, pad=20)
    plt.xlabel('预测类别', fontsize=14)
    plt.ylabel('真实类别', fontsize=14)
    plt.xticks(fontsize=12)
    plt.yticks(fontsize=12)

    plt.tight_layout()
    plt.show()


# 可视化决策边界
def plot_decision_boundary(X, y, model, title):
    # 为了可视化，只使用前两个特征
    X_plot = X[:, :2]

    # 创建网格
    h = 0.02
    x_min, x_max = X_plot[:, 0].min() - 1, X_plot[:, 0].max() + 1
    y_min, y_max = X_plot[:, 1].min() - 1, X_plot[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))

    # 预测
    X_mean = np.mean(X, axis=0)
    Z = model.predict(np.c_[xx.ravel(), yy.ravel(),
    np.full(xx.ravel().shape, X_mean[2]),
    np.full(xx.ravel().shape, X_mean[3])])
    Z = Z.reshape(xx.shape)

    # 可视化 - 优化标题和样式
    plt.figure(figsize=(12, 10))
    plt.contourf(xx, yy, Z, alpha=0.3, cmap='viridis')
    scatter = plt.scatter(X_plot[:, 0], X_plot[:, 1], c=y, cmap='viridis',
                          edgecolors='k', s=80, alpha=0.8)

    # 优化标题和标签
    plt.title(title, fontsize=16, pad=20)
    plt.xlabel(feature_names[0], fontsize=14)
    plt.ylabel(feature_names[1], fontsize=14)
    plt.xticks(fontsize=12)
    plt.yticks(fontsize=12)

    # 添加图例
    legend1 = plt.legend(*scatter.legend_elements(),
                         loc="upper right", title="类别")
    plt.gca().add_artist(legend1)

    # 自定义图例标签
    handles, labels = scatter.legend_elements()
    for i, label in enumerate(labels):
        labels[i] = target_names[i]
    legend2 = plt.legend(handles, labels, loc="lower right", title="真实类别")
    plt.gca().add_artist(legend2)

    plt.tight_layout()
    plt.show()


# 可视化混淆矩阵
print("\n5. 可视化混淆矩阵...")
plot_confusion_matrix(y_test, y_pred, f'自定义KNN算法 (K={best_k}, 距离度量={best_metric}) 的混淆矩阵')
plot_confusion_matrix(y_test, sklearn_pred, f'sklearn KNN算法 (K={best_k}, 距离度量={best_metric}) 的混淆矩阵')

# 可视化决策边界
print("\n6. 可视化决策边界...")
plot_decision_boundary(X_scaled, y, final_knn, f'自定义KNN算法 (K={best_k}, 距离度量={best_metric}) 的决策边界')
plot_decision_boundary(X_scaled, y, sklearn_knn, f'sklearn KNN算法 (K={best_k}, 距离度量={best_metric}) 的决策边界')
