import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split, cross_val_score, KFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
from sklearn.preprocessing import StandardScaler, LabelEncoder
import time
import seaborn as sns
from matplotlib.colors import ListedColormap

# 使用matplotlib的字体管理器手动加载字体
try:
    from matplotlib.font_manager import FontProperties, findfont, FontManager
    import matplotlib.font_manager as fm

    # 尝试获取系统中可用的中文字体
    font_paths = fm.findSystemFonts()
    chinese_fonts = [f for f in font_paths if any(c in f for c in ['hei', 'song', 'kai', 'yahei', 'microsoft', 'sim'])]

    if chinese_fonts:
        # 如果找到中文字体，使用第一个
        chinese_font = chinese_fonts[0]
        plt.rcParams['font.family'] = ['sans-serif']
        plt.rcParams['font.sans-serif'] = [FontProperties(fname=chinese_font).get_name()]
        plt.rcParams['axes.unicode_minus'] = False
        print(f"已找到并使用中文字体: {FontProperties(fname=chinese_font).get_name()}")
    else:
        print("未找到中文字体，使用英文显示图表")
        plt.rcParams['font.family'] = ['sans-serif']
        plt.rcParams['axes.unicode_minus'] = False

except Exception as e:
    print(f"字体设置出错: {e}")
    print("使用默认字体设置")
    plt.rcParams['font.family'] = ['sans-serif']
    plt.rcParams['axes.unicode_minus'] = False

# 加载数据集
iris = load_iris()
X = iris.data
y = iris.target
feature_names = iris.feature_names
target_names = iris.target_names

print("数据集信息:")
print(f"样本数量: {X.shape[0]}")
print(f"特征数量: {X.shape[1]}")
print(f"特征名称: {feature_names}")
print(f"类别数量: {len(np.unique(y))}")
print(f"类别名称: {target_names}")

# 数据标准化
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# 使用更大的测试集比例，并设置随机种子以确保结果可重现
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.3, random_state=42)


class MyKNN:
    def __init__(self, k=5, metric='euclidean'):
        self.k = k
        self.metric = metric.lower()

    def fit(self, X, y):
        self.X_train = X
        self.y_train = y

    def predict(self, X):
        """批量预测，提高效率"""
        if not hasattr(self, 'X_train') or not hasattr(self, 'y_train'):
            raise ValueError("模型尚未训练，请先调用fit方法")

        y_pred = np.zeros(X.shape[0], dtype=int)
        for i, x in enumerate(X):
            y_pred[i] = self._predict(x)
        return y_pred

    def _predict(self, x):
        """预测单个样本"""
        # 计算距离 - 使用向量化计算提高效率
        if self.metric == 'euclidean':
            distances = np.sqrt(np.sum((self.X_train - x) ** 2, axis=1))
        elif self.metric == 'manhattan':
            distances = np.sum(np.abs(self.X_train - x), axis=1)
        elif self.metric == 'chebyshev':
            distances = np.max(np.abs(self.X_train - x), axis=1)
        elif self.metric == 'minkowski':
            # 默认p=3
            distances = np.sum(np.abs(self.X_train - x) ** 3, axis=1) ** (1 / 3)
        elif self.metric == 'cosine':
            # 余弦距离 = 1 - 余弦相似度
            norm_train = np.linalg.norm(self.X_train, axis=1)
            norm_x = np.linalg.norm(x)
            if norm_x == 0:
                distances = np.ones(self.X_train.shape[0])
            else:
                cosine_similarity = np.dot(self.X_train, x) / (norm_train * norm_x)
                distances = 1 - cosine_similarity
        elif self.metric == 'hamming':
            # 汉明距离 - 适用于二值特征
            # 将连续特征二值化（大于0为1，否则为0）
            binarized_train = (self.X_train > 0).astype(int)
            binarized_x = (x > 0).astype(int)
            distances = np.sum(binarized_train != binarized_x, axis=1) / X.shape[1]
        else:
            raise ValueError(f"不支持的距离度量: {self.metric}")

        # 获取最近的k个样本的索引
        k_indices = np.argsort(distances)[:self.k]
        # 获取这些样本的标签
        k_nearest_labels = self.y_train[k_indices]
        # 返回出现最多的标签
        most_common = np.bincount(k_nearest_labels).argmax()
        return most_common


# 寻找最优K值（使用交叉验证）
def find_best_k(X_train, y_train, max_k=20, metric='euclidean', cv=5):
    """使用交叉验证寻找最优K值"""
    print(f"\n使用{metric}距离度量寻找最优K值 (交叉验证折数={cv})...")

    # 创建交叉验证对象
    kf = KFold(n_splits=cv, shuffle=True, random_state=42)

    # 存储每个K值的平均准确率
    mean_accuracies = []
    std_accuracies = []
    all_accuracies = []

    for k in range(1, max_k + 1):
        fold_accuracies = []
        for train_idx, val_idx in kf.split(X_train):
            X_fold_train, X_fold_val = X_train[train_idx], X_train[val_idx]
            y_fold_train, y_fold_val = y_train[train_idx], y_train[val_idx]

            knn = MyKNN(k=k, metric=metric)
            knn.fit(X_fold_train, y_fold_train)
            y_pred = knn.predict(X_fold_val)
            accuracy = accuracy_score(y_fold_val, y_pred)
            fold_accuracies.append(accuracy)

        mean_accuracy = np.mean(fold_accuracies)
        std_accuracy = np.std(fold_accuracies)
        mean_accuracies.append(mean_accuracy)
        std_accuracies.append(std_accuracy)
        all_accuracies.append(fold_accuracies)

        print(f"K={k:2d}, 平均准确率={mean_accuracy:.4f} ± {std_accuracy:.4f}")

    # 找到最优K值
    best_idx = np.argmax(mean_accuracies)
    best_k = best_idx + 1
    best_accuracy = mean_accuracies[best_idx]

    print(f"\n最优K值: {best_k}, 平均准确率: {best_accuracy:.4f}")

    # 绘制K值与准确率关系图
    plt.figure(figsize=(12, 6))
    plt.errorbar(range(1, max_k + 1), mean_accuracies, yerr=std_accuracies,
                 fmt='-o', ecolor='lightgray', capsize=5)
    plt.axvline(x=best_k, color='r', linestyle='--', alpha=0.7,
                label=f'最优K值 ({best_k})')
    plt.title(f'不同K值的准确率 ({metric}距离) - 交叉验证结果')
    plt.xlabel('K值')
    plt.ylabel('平均准确率')
    plt.xticks(range(1, max_k + 1))
    plt.grid(True, alpha=0.3)
    plt.legend()
    plt.tight_layout()
    plt.show()

    return best_k, best_accuracy, mean_accuracies, std_accuracies, all_accuracies


# 测试不同的距离度量
distance_metrics = ['euclidean', 'manhattan', 'chebyshev', 'minkowski', 'cosine', 'hamming']
best_ks = {}
best_accuracies = {}
all_results = {}

for metric in distance_metrics:
    try:
        best_k, best_accuracy, mean_acc, std_acc, all_acc = find_best_k(
            X_train, y_train, max_k=20, metric=metric, cv=5
        )
        best_ks[metric] = best_k
        best_accuracies[metric] = best_accuracy
        all_results[metric] = {
            'mean_acc': mean_acc,
            'std_acc': std_acc,
            'all_acc': all_acc
        }
    except Exception as e:
        print(f"距离度量 {metric} 执行失败: {e}")
        best_ks[metric] = None
        best_accuracies[metric] = None
        all_results[metric] = None

# 比较不同距离度量的结果
print("\n不同距离度量的最优结果比较:")
print("{:<15} {:<10} {:<15}".format("距离度量", "最优K值", "准确率"))
print("-" * 40)
for metric in distance_metrics:
    if best_ks[metric] is not None:
        print("{:<15} {:<10} {:<15.4f}".format(metric, best_ks[metric], best_accuracies[metric]))

# 选择最优的距离度量和K值
valid_metrics = [m for m in distance_metrics if best_accuracies[m] is not None]
if valid_metrics:
    best_metric = max(valid_metrics, key=lambda m: best_accuracies[m])
    best_k = best_ks[best_metric]
    print(f"\n总体最优: {best_metric}距离, K={best_k}, 准确率={best_accuracies[best_metric]:.4f}")
else:
    print("\n所有距离度量均失败，使用默认欧氏距离和K=5")
    best_metric = 'euclidean'
    best_k = 5

# 绘制不同距离度量的最优准确率对比图
plt.figure(figsize=(14, 8))
metrics = [m for m in distance_metrics if best_accuracies[m] is not None]
accuracies = [best_accuracies[m] for m in metrics]
optimal_ks = [best_ks[m] for m in metrics]

colors = plt.cm.viridis(np.linspace(0, 1, len(metrics)))
bars = plt.bar(metrics, accuracies, color=colors)

plt.title('不同距离度量在最优K值下的准确率对比')
plt.xlabel('距离度量')
plt.ylabel('准确率')
plt.ylim(min(accuracies) - 0.05, max(accuracies) + 0.05)

# 在柱状图上方添加准确率数值标签
for bar, accuracy, k in zip(bars, accuracies, optimal_ks):
    height = bar.get_height()
    plt.text(bar.get_x() + bar.get_width() / 2., height + 0.005,
             f'{accuracy:.4f}\nK={k}', ha='center', va='bottom')

plt.grid(axis='y', linestyle='--', alpha=0.7)
plt.tight_layout()
plt.show()

# 绘制不同距离度量的K值-准确率关系对比图
plt.figure(figsize=(14, 10))
max_k = 20

for metric in distance_metrics:
    if all_results[metric] is not None:
        plt.plot(range(1, max_k + 1), all_results[metric]['mean_acc'],
                 marker='o', linewidth=2, label=metric)
        plt.fill_between(range(1, max_k + 1),
                         np.array(all_results[metric]['mean_acc']) - np.array(all_results[metric]['std_acc']),
                         np.array(all_results[metric]['mean_acc']) + np.array(all_results[metric]['std_acc']),
                         alpha=0.2)

plt.title('不同距离度量下K值与准确率的关系')
plt.xlabel('K值')
plt.ylabel('平均准确率')
plt.legend()
plt.grid(True)
plt.tight_layout()
plt.show()

# 使用最优参数训练模型并评估
print("\n使用最优参数的详细评估:")
knn = MyKNN(k=best_k, metric=best_metric)
knn.fit(X_train, y_train)

start_time = time.time()
y_pred = knn.predict(X_test)
my_knn_time = time.time() - start_time

accuracy = accuracy_score(y_test, y_pred)
print(f"我的KNN实现 - {best_metric}距离, K={best_k}")
print(f"测试集准确率: {accuracy:.4f}")
print("分类报告:")
print(classification_report(y_test, y_pred, target_names=target_names))
print(f"预测时间: {my_knn_time:.6f}秒")

# 绘制混淆矩阵
cm = confusion_matrix(y_test, y_pred)
plt.figure(figsize=(10, 8))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
            xticklabels=target_names, yticklabels=target_names)
plt.title(f'混淆矩阵 ({best_metric}距离, K={best_k})')
plt.xlabel('预测标签')
plt.ylabel('真实标签')
plt.tight_layout()
plt.show()

# 与sklearn的KNN进行对比
print("\n与sklearn的KNN对比:")
try:
    # 根据距离度量选择参数
    if best_metric == 'minkowski':
        sklearn_knn = KNeighborsClassifier(n_neighbors=best_k, metric=best_metric, p=3)
    elif best_metric == 'cosine':
        # sklearn使用'cosine'作为字符串
        sklearn_knn = KNeighborsClassifier(n_neighbors=best_k, metric='cosine')
    elif best_metric == 'hamming':
        # sklearn使用'jaccard'替代汉明距离
        sklearn_knn = KNeighborsClassifier(n_neighbors=best_k, metric='hamming')
    else:
        sklearn_knn = KNeighborsClassifier(n_neighbors=best_k, metric=best_metric)

    start_time = time.time()
    sklearn_knn.fit(X_train, y_train)
    y_pred_sklearn = sklearn_knn.predict(X_test)
    sklearn_time = time.time() - start_time

    sklearn_accuracy = accuracy_score(y_test, y_pred_sklearn)
    print(f"sklearn KNN - {best_metric}距离, K={best_k}")
    print(f"测试集准确率: {sklearn_accuracy:.4f}")
    print("分类报告:")
    print(classification_report(y_test, y_pred_sklearn, target_names=target_names))
    print(f"预测时间: {sklearn_time:.6f}秒")
    print(f"速度比 (我的实现/sklearn): {my_knn_time / sklearn_time:.2f}x")

    # 绘制两个模型的准确率对比
    plt.figure(figsize=(10, 6))
    models = ['我的KNN', 'sklearn KNN']
    accuracies = [accuracy, sklearn_accuracy]
    times = [my_knn_time, sklearn_time]

    # 准确率对比
    plt.subplot(1, 2, 1)
    plt.bar(models, accuracies, color=['skyblue', 'lightgreen'])
    plt.ylim(0.8, 1.0)
    plt.title('准确率对比')
    plt.ylabel('准确率')
    for i, v in enumerate(accuracies):
        plt.text(i, v + 0.01, f"{v:.4f}", ha='center')

    # 时间对比
    plt.subplot(1, 2, 2)
    plt.bar(models, times, color=['salmon', 'gold'])
    plt.title('预测时间对比')
    plt.ylabel('时间(秒)')
    for i, v in enumerate(times):
        plt.text(i, v + 0.001, f"{v:.6f}", ha='center')

    plt.suptitle(f'KNN模型对比 ({best_metric}距离, K={best_k})')
    plt.tight_layout()
    plt.show()

except Exception as e:
    print(f"sklearn KNN对比失败: {e}")

# 特征重要性分析（基于PCA）
print("\n特征重要性分析:")
pca = PCA()
pca.fit(X_scaled)
explained_variance = pca.explained_variance_ratio_

plt.figure(figsize=(12, 6))
plt.bar(range(len(explained_variance)), explained_variance, alpha=0.7)
plt.plot(range(len(explained_variance)), np.cumsum(explained_variance),
         'r-o', linewidth=2)
plt.axhline(y=0.95, color='g', linestyle='--', label='95%方差')
plt.title('特征方差解释率')
plt.xlabel('主成分')
plt.ylabel('解释方差比例')
plt.xticks(range(len(explained_variance)), feature_names)
plt.legend()
plt.grid(alpha=0.3)
plt.tight_layout()
plt.show()

# 决策边界可视化（选择两个最重要的特征）
if best_metric != 'hamming':  # 汉明距离不适合连续特征
    print("\n决策边界可视化 (使用前两个主成分):")

    # 使用PCA降维到2维
    pca = PCA(n_components=2)
    X_pca = pca.fit_transform(X_scaled)
    X_train_pca, X_test_pca, y_train_pca, y_test_pca = train_test_split(
        X_pca, y, test_size=0.3, random_state=42
    )

    # 使用降维后的数据训练模型
    knn_pca = MyKNN(k=best_k, metric=best_metric)
    knn_pca.fit(X_train_pca, y_train_pca)

    # 创建网格点
    h = 0.02  # 步长
    x_min, x_max = X_pca[:, 0].min() - 1, X_pca[:, 0].max() + 1
    y_min, y_max = X_pca[:, 1].min() - 1, X_pca[:, 1].max() + 1
    xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                         np.arange(y_min, y_max, h))

    # 预测网格点
    Z = knn_pca.predict(np.c_[xx.ravel(), yy.ravel()])
    Z = Z.reshape(xx.shape)

    # 绘制决策边界
    plt.figure(figsize=(12, 10))
    cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
    cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])

    plt.contourf(xx, yy, Z, cmap=cmap_light, alpha=0.8)

    # 绘制训练点
    plt.scatter(X_train_pca[:, 0], X_train_pca[:, 1], c=y_train_pca,
                cmap=cmap_bold, edgecolor='k', s=40, label='训练集')

    # 绘制测试点
    plt.scatter(X_test_pca[:, 0], X_test_pca[:, 1], c=y_test_pca,
                cmap=cmap_bold, edgecolor='w', s=80, marker='s', label='测试集')

    plt.xlim(xx.min(), xx.max())
    plt.ylim(yy.min(), yy.max())
    plt.title(f'KNN决策边界 ({best_metric}距离, K={best_k})')
    plt.xlabel('主成分1')
    plt.ylabel('主成分2')
    plt.legend()
    plt.tight_layout()
    plt.show()
else:
    print("汉明距离不适用于决策边界可视化，已跳过")

print("\n分析完成!")