"""
KNN分类任务完整示例
使用鸢尾花数据集演示KNN分类算法的应用
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import load_iris, make_classification
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
from sklearn.pipeline import Pipeline
import warnings
warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

class KNNClassificationDemo:
    def __init__(self):
        self.model = None
        self.scaler = StandardScaler()
        self.X_train = None
        self.X_test = None
        self.y_train = None
        self.y_test = None
        
    def load_data(self, dataset_type='iris'):
        """加载数据集"""
        if dataset_type == 'iris':
            # 加载鸢尾花数据集
            data = load_iris()
            X, y = data.data, data.target
            feature_names = data.feature_names
            target_names = data.target_names
            print("使用鸢尾花数据集")
            print(f"特征数量: {X.shape[1]}")
            print(f"样本数量: {X.shape[0]}")
            print(f"类别数量: {len(np.unique(y))}")
            print(f"类别名称: {target_names}")
            
        elif dataset_type == 'synthetic':
            # 生成合成数据集
            X, y = make_classification(
                n_samples=1000,
                n_features=2,
                n_redundant=0,
                n_informative=2,
                n_clusters_per_class=1,
                random_state=42
            )
            feature_names = ['Feature 1', 'Feature 2']
            target_names = ['Class 0', 'Class 1']
            print("使用合成数据集")
            print(f"特征数量: {X.shape[1]}")
            print(f"样本数量: {X.shape[0]}")
            print(f"类别数量: {len(np.unique(y))}")
            
        return X, y, feature_names, target_names
    
    def prepare_data(self, X, y, test_size=0.3, random_state=42):
        """数据预处理和分割"""
        # 分割训练集和测试集
        self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
            X, y, test_size=test_size, random_state=random_state, stratify=y
        )
        
        # 标准化特征
        self.X_train_scaled = self.scaler.fit_transform(self.X_train)
        self.X_test_scaled = self.scaler.transform(self.X_test)
        
        print(f"训练集大小: {self.X_train.shape}")
        print(f"测试集大小: {self.X_test.shape}")
        
    def train_model(self, k=5, weights='uniform', metric='minkowski', p=2):
        """训练KNN模型"""
        self.model = KNeighborsClassifier(
            n_neighbors=k,
            weights=weights,
            metric=metric,
            p=p
        )
        
        # 训练模型
        self.model.fit(self.X_train_scaled, self.y_train)
        print(f"KNN模型训练完成 (k={k}, weights={weights}, metric={metric})")
        
    def evaluate_model(self):
        """评估模型性能"""
        # 预测
        y_pred = self.model.predict(self.X_test_scaled)
        
        # 计算准确率
        accuracy = accuracy_score(self.y_test, y_pred)
        print(f"测试集准确率: {accuracy:.4f}")
        
        # 分类报告
        print("\n分类报告:")
        print(classification_report(self.y_test, y_pred))
        
        # 混淆矩阵
        cm = confusion_matrix(self.y_test, y_pred)
        print("\n混淆矩阵:")
        print(cm)
        
        return accuracy, y_pred, cm
    
    def cross_validation(self, cv=5):
        """交叉验证"""
        scores = cross_val_score(self.model, self.X_train_scaled, self.y_train, cv=cv)
        print(f"\n{cv}折交叉验证结果:")
        print(f"平均准确率: {scores.mean():.4f} (+/- {scores.std() * 2:.4f})")
        return scores
    
    def hyperparameter_tuning(self):
        """超参数调优"""
        param_grid = {
            'n_neighbors': range(1, 21),
            'weights': ['uniform', 'distance'],
            'metric': ['euclidean', 'manhattan', 'minkowski']
        }
        
        grid_search = GridSearchCV(
            KNeighborsClassifier(),
            param_grid,
            cv=5,
            scoring='accuracy',
            n_jobs=-1
        )
        
        grid_search.fit(self.X_train_scaled, self.y_train)
        
        print("\n超参数调优结果:")
        print(f"最佳参数: {grid_search.best_params_}")
        print(f"最佳交叉验证分数: {grid_search.best_score_:.4f}")
        
        # 使用最佳参数重新训练模型
        self.model = grid_search.best_estimator_
        
        return grid_search.best_params_, grid_search.best_score_
    
    def plot_decision_boundary(self, X, y, feature_names, target_names):
        """绘制决策边界（仅适用于2D数据）"""
        if X.shape[1] != 2:
            print("决策边界可视化仅适用于2维数据")
            return
            
        # 标准化数据
        X_scaled = self.scaler.fit_transform(X)
        
        # 训练模型
        model = KNeighborsClassifier(n_neighbors=5)
        model.fit(X_scaled, y)
        
        # 创建网格
        h = 0.02
        x_min, x_max = X_scaled[:, 0].min() - 1, X_scaled[:, 0].max() + 1
        y_min, y_max = X_scaled[:, 1].min() - 1, X_scaled[:, 1].max() + 1
        xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
                            np.arange(y_min, y_max, h))
        
        # 预测网格点
        Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
        Z = Z.reshape(xx.shape)
        
        # 绘图
        plt.figure(figsize=(12, 5))
        
        # 决策边界
        plt.subplot(1, 2, 1)
        plt.contourf(xx, yy, Z, alpha=0.8, cmap=plt.cm.RdYlBu)
        scatter = plt.scatter(X_scaled[:, 0], X_scaled[:, 1], c=y, cmap=plt.cm.RdYlBu, edgecolors='black')
        plt.xlabel(feature_names[0])
        plt.ylabel(feature_names[1])
        plt.title('KNN决策边界')
        plt.colorbar(scatter)
        
        # 不同K值的比较
        plt.subplot(1, 2, 2)
        k_values = [1, 3, 5, 15]
        accuracies = []
        
        for k in k_values:
            knn = KNeighborsClassifier(n_neighbors=k)
            scores = cross_val_score(knn, X_scaled, y, cv=5)
            accuracies.append(scores.mean())
        
        plt.plot(k_values, accuracies, 'bo-')
        plt.xlabel('K值')
        plt.ylabel('交叉验证准确率')
        plt.title('不同K值的性能比较')
        plt.grid(True)
        
        plt.tight_layout()
        plt.show()
    
    def plot_confusion_matrix(self, cm, target_names):
        """绘制混淆矩阵热图"""
        plt.figure(figsize=(8, 6))
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                   xticklabels=target_names, yticklabels=target_names)
        plt.title('混淆矩阵')
        plt.xlabel('预测标签')
        plt.ylabel('真实标签')
        plt.show()

def main():
    """主函数 - 完整的KNN分类示例"""
    print("=" * 60)
    print("KNN分类任务完整示例")
    print("=" * 60)
    
    # 创建KNN分类器实例
    knn_demo = KNNClassificationDemo()
    
    # 1. 加载数据
    print("\n1. 加载数据集")
    X, y, feature_names, target_names = knn_demo.load_data('iris')
    
    # 2. 数据预处理
    print("\n2. 数据预处理")
    knn_demo.prepare_data(X, y)
    
    # 3. 训练基础模型
    print("\n3. 训练KNN模型")
    knn_demo.train_model(k=5)
    
    # 4. 模型评估
    print("\n4. 模型评估")
    accuracy, y_pred, cm = knn_demo.evaluate_model()
    
    # 5. 交叉验证
    print("\n5. 交叉验证")
    cv_scores = knn_demo.cross_validation()
    
    # 6. 超参数调优
    print("\n6. 超参数调优")
    best_params, best_score = knn_demo.hyperparameter_tuning()
    
    # 7. 最终评估
    print("\n7. 使用最佳参数的最终评估")
    final_accuracy, final_y_pred, final_cm = knn_demo.evaluate_model()
    
    # 8. 可视化
    print("\n8. 结果可视化")
    knn_demo.plot_confusion_matrix(final_cm, target_names)
    
    # 9. 使用2D数据演示决策边界
    print("\n9. 决策边界可视化（使用2D合成数据）")
    X_2d, y_2d, feature_names_2d, target_names_2d = knn_demo.load_data('synthetic')
    knn_demo.plot_decision_boundary(X_2d, y_2d, feature_names_2d, target_names_2d)
    
    # 10. API调用示例
    print("\n10. API调用示例")
    print("=" * 40)
    
    # 基本用法
    print("基本KNN分类器用法:")
    print("```python")
    print("from sklearn.neighbors import KNeighborsClassifier")
    print("from sklearn.preprocessing import StandardScaler")
    print("")
    print("# 创建并训练模型")
    print("scaler = StandardScaler()")
    print("X_scaled = scaler.fit_transform(X_train)")
    print("knn = KNeighborsClassifier(n_neighbors=5)")
    print("knn.fit(X_scaled, y_train)")
    print("")
    print("# 预测")
    print("X_test_scaled = scaler.transform(X_test)")
    print("predictions = knn.predict(X_test_scaled)")
    print("probabilities = knn.predict_proba(X_test_scaled)")
    print("```")
    
    print(f"\n最终模型性能总结:")
    print(f"- 最佳参数: {best_params}")
    print(f"- 交叉验证分数: {best_score:.4f}")
    print(f"- 测试集准确率: {final_accuracy:.4f}")

if __name__ == "__main__":
    main()
