import numpy as np
import matplotlib.pyplot as plt
from collections import Counter
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import (accuracy_score, confusion_matrix, classification_report,
                             precision_recall_curve, roc_curve, auc, RocCurveDisplay, PrecisionRecallDisplay)
from sklearn.preprocessing import label_binarize
from sklearn.base import BaseEstimator, ClassifierMixin
import time
import os

# 设置matplotlib使用Agg后端，不显示图形
plt.switch_backend('Agg')


# 增强KNN实现，添加概率预测功能
class EnhancedKNN(BaseEstimator, ClassifierMixin):
    def __init__(self, n_neighbors=5):
        self.n_neighbors = n_neighbors

    def fit(self, X, y):
        self.X_train = X
        self.y_train = y
        self.classes_ = np.unique(y)
        return self

    def predict(self, X_test):
        # 向量化距离计算
        distances = np.sqrt(np.sum((self.X_train - X_test[:, np.newaxis]) ** 2, axis=2))
        # 获取最近的k个邻居的索引
        k_indices = np.argpartition(distances, self.n_neighbors, axis=1)[:, :self.n_neighbors]
        # 获取邻居的标签并找到最常见标签
        k_nearest_labels = self.y_train[k_indices]
        return np.array([Counter(labels).most_common(1)[0][0] for labels in k_nearest_labels])

    def predict_proba(self, X_test):
        """预测概率，用于ROC和PR曲线"""
        distances = np.sqrt(np.sum((self.X_train - X_test[:, np.newaxis]) ** 2, axis=2))
        k_indices = np.argpartition(distances, self.n_neighbors, axis=1)[:, :self.n_neighbors]
        k_nearest_labels = self.y_train[k_indices]

        # 计算每个类别的概率
        proba = np.zeros((X_test.shape[0], len(self.classes_)))
        for i, labels in enumerate(k_nearest_labels):
            counts = np.bincount(labels, minlength=len(self.classes_))
            proba[i] = counts / counts.sum()
        return proba

    # 添加scikit-learn估计器所需的方法
    def get_params(self, deep=True):
        return {"n_neighbors": self.n_neighbors}

    def set_params(self, **params):
        for key, value in params.items():
            setattr(self, key, value)
        return self


# 主函数
def main():
    # 1. 加载鸢尾花数据集
    data = load_iris()
    X, y = data.data, data.target
    target_names = data.target_names
    n_classes = len(target_names)

    print("=" * 80)
    print("鸢尾花数据集信息:")
    print(f"样本数: {X.shape[0]}, 特征数: {X.shape[1]}")

    # 修复类别分布输出
    unique_classes, class_counts = np.unique(y, return_counts=True)
    class_distribution = {int(cls): int(count) for cls, count in zip(unique_classes, class_counts)}
    print(f"类别分布: {class_distribution}")

    print(f"目标类别: {target_names}")
    print("=" * 80)

    # 2. 划分训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.3, random_state=42
    )
    print(f"训练集大小: {X_train.shape[0]}, 测试集大小: {X_test.shape[0]}")

    # 3. 超参数优化
    print("\n开始超参数优化...")
    knn = EnhancedKNN()
    param_grid = {'n_neighbors': list(range(1, 16, 2))}

    grid_search = GridSearchCV(
        knn,
        param_grid,
        cv=5,
        scoring='accuracy',
        return_train_score=True
    )

    start_time = time.time()
    grid_search.fit(X_train, y_train)
    search_time = time.time() - start_time

    print(f"超参数搜索完成! 耗时: {search_time:.2f}秒")
    print(f"最佳K值: {grid_search.best_params_['n_neighbors']}")
    print(f"交叉验证最佳准确率: {grid_search.best_score_:.4f}")

    # 4. 测试最佳模型
    best_knn = grid_search.best_estimator_
    y_pred = best_knn.predict(X_test)
    test_accuracy = accuracy_score(y_test, y_pred)

    print("\n" + "=" * 80)
    print("最佳模型在测试集上的性能:")
    print(f"测试准确率: {test_accuracy:.4f}")

    # 5. 混淆矩阵
    cm = confusion_matrix(y_test, y_pred)
    print("\n混淆矩阵:")
    print(cm)

    # 6. 分类报告
    print("\n分类报告:")
    print(classification_report(y_test, y_pred, target_names=target_names))

    # 7. 创建图表目录
    os.makedirs('results', exist_ok=True)

    # 8. 准备ROC和PR曲线数据
    # 二值化标签用于ROC和PR曲线
    y_test_bin = label_binarize(y_test, classes=np.unique(y))
    y_score = best_knn.predict_proba(X_test)

    # 计算每个类别的ROC曲线和AUC
    fpr = dict()
    tpr = dict()
    roc_auc = dict()
    for i in range(n_classes):
        fpr[i], tpr[i], _ = roc_curve(y_test_bin[:, i], y_score[:, i])
        roc_auc[i] = auc(fpr[i], tpr[i])

    # 计算宏平均ROC曲线和AUC
    all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
    mean_tpr = np.zeros_like(all_fpr)
    for i in range(n_classes):
        mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])
    mean_tpr /= n_classes
    fpr["macro"] = all_fpr
    tpr["macro"] = mean_tpr
    roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])

    # 计算每个类别的PR曲线和AUPRC
    precision = dict()
    recall = dict()
    average_precision = dict()
    for i in range(n_classes):
        precision[i], recall[i], _ = precision_recall_curve(y_test_bin[:, i], y_score[:, i])
        average_precision[i] = auc(recall[i], precision[i])

    # 计算宏平均PR曲线
    mean_precision = np.zeros_like(all_fpr)
    mean_recall = np.zeros_like(all_fpr)
    for i in range(n_classes):
        mean_precision += np.interp(all_fpr, recall[i], precision[i])
        mean_recall += np.interp(all_fpr, fpr[i], recall[i])
    mean_precision /= n_classes
    mean_recall /= n_classes
    precision["macro"] = mean_precision
    recall["macro"] = mean_recall
    average_precision["macro"] = auc(recall["macro"], precision["macro"])

    # 9. 绘制ROC曲线
    plt.figure(figsize=(10, 8))
    colors = ['blue', 'red', 'green']

    # 绘制每个类别的ROC曲线
    for i, color in zip(range(n_classes), colors):
        plt.plot(fpr[i], tpr[i], color=color, lw=2,
                 label=f'ROC curve of {target_names[i]} (AUC = {roc_auc[i]:.2f})')

    # 绘制宏平均ROC曲线
    plt.plot(fpr["macro"], tpr["macro"], color='navy', linestyle=':', linewidth=4,
             label=f'Macro-average ROC curve (AUC = {roc_auc["macro"]:.2f})')

    plt.plot([0, 1], [0, 1], 'k--', lw=2)
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver Operating Characteristic (ROC) Curves')
    plt.legend(loc="lower right")
    plt.grid(True)

    plt.savefig('results/roc_curve.png', dpi=300)
    plt.close()
    print("ROC曲线已保存为 'results/roc_curve.png'")

    # 10. 绘制PR曲线
    plt.figure(figsize=(10, 8))

    # 绘制每个类别的PR曲线
    for i, color in zip(range(n_classes), colors):
        plt.plot(recall[i], precision[i], color=color, lw=2,
                 label=f'PR curve of {target_names[i]} (AP = {average_precision[i]:.2f})')

    # 绘制宏平均PR曲线
    plt.plot(recall["macro"], precision["macro"], color='navy', linestyle=':', linewidth=4,
             label=f'Macro-average PR curve (AP = {average_precision["macro"]:.2f})')

    plt.xlabel('Recall')
    plt.ylabel('Precision')
    plt.ylim([0.0, 1.05])
    plt.xlim([0.0, 1.0])
    plt.title('Precision-Recall (PR) Curves')
    plt.legend(loc="best")
    plt.grid(True)

    plt.savefig('results/pr_curve.png', dpi=300)
    plt.close()
    print("PR曲线已保存为 'results/pr_curve.png'")

    # 11. 保存所有结果
    print("\n实验完成! ROC和PR曲线已保存到 'results' 目录:")

    # 返回结果用于报告
    return {
        'best_k': grid_search.best_params_['n_neighbors'],
        'cv_accuracy': grid_search.best_score_,
        'test_accuracy': test_accuracy,
        'confusion_matrix': cm,
        'classification_report': classification_report(y_test, y_pred, target_names=target_names, output_dict=True),
        'roc_auc': roc_auc,
        'average_precision': average_precision
    }


if __name__ == "__main__":
    results = main()