"""
KNN算法综合对比和可视化
包含分类和回归任务的性能对比、参数影响分析、可视化展示
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_classification, make_regression, load_iris
from sklearn.model_selection import train_test_split, validation_curve, learning_curve
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, mean_squared_error, r2_score
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
import time
import warnings
warnings.filterwarnings('ignore')

# 设置中文字体和样式
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.style.use('seaborn-v0_8')

class KNNComprehensiveAnalysis:
    def __init__(self):
        self.scaler = StandardScaler()
        
    def generate_datasets(self):
        """生成多种类型的数据集"""
        datasets = {}
        
        # 1. 分类数据集
        X_cls, y_cls = make_classification(
            n_samples=1000, n_features=2, n_redundant=0, 
            n_informative=2, n_clusters_per_class=1, random_state=42
        )
        datasets['classification'] = (X_cls, y_cls)
        
        # 2. 回归数据集
        X_reg, y_reg = make_regression(
            n_samples=1000, n_features=2, noise=10, random_state=42
        )
        datasets['regression'] = (X_reg, y_reg)
        
        # 3. 鸢尾花数据集
        iris = load_iris()
        datasets['iris'] = (iris.data, iris.target)
        
        return datasets
    
    def analyze_k_parameter(self, datasets):
        """分析K参数对性能的影响"""
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        
        k_range = range(1, 31)
        
        for idx, (name, (X, y)) in enumerate(datasets.items()):
            if name == 'iris':
                continue
                
            X_train, X_test, y_train, y_test = train_test_split(
                X, y, test_size=0.3, random_state=42
            )
            X_train_scaled = self.scaler.fit_transform(X_train)
            X_test_scaled = self.scaler.transform(X_test)
            
            train_scores = []
            test_scores = []
            
            if name == 'classification':
                model_class = KNeighborsClassifier
                score_func = accuracy_score
                ax = axes[0, 0]
                title = 'KNN分类 - K值影响'
                ylabel = '准确率'
            else:  # regression
                model_class = KNeighborsRegressor
                score_func = r2_score
                ax = axes[0, 1]
                title = 'KNN回归 - K值影响'
                ylabel = 'R² 分数'
            
            for k in k_range:
                model = model_class(n_neighbors=k)
                model.fit(X_train_scaled, y_train)
                
                train_pred = model.predict(X_train_scaled)
                test_pred = model.predict(X_test_scaled)
                
                train_scores.append(score_func(y_train, train_pred))
                test_scores.append(score_func(y_test, test_pred))
            
            ax.plot(k_range, train_scores, 'o-', label='训练集', markersize=4)
            ax.plot(k_range, test_scores, 's-', label='测试集', markersize=4)
            ax.set_xlabel('K值')
            ax.set_ylabel(ylabel)
            ax.set_title(title)
            ax.legend()
            ax.grid(True, alpha=0.3)
        
        # 权重方式比较
        self.compare_weights(datasets, axes[1, :])
        
        plt.tight_layout()
        plt.show()
    
    def compare_weights(self, datasets, axes):
        """比较不同权重方式的性能"""
        weights = ['uniform', 'distance']
        k_values = [3, 5, 7, 10, 15]
        
        for idx, (name, (X, y)) in enumerate(datasets.items()):
            if name == 'iris':
                continue
                
            X_train, X_test, y_train, y_test = train_test_split(
                X, y, test_size=0.3, random_state=42
            )
            X_train_scaled = self.scaler.fit_transform(X_train)
            X_test_scaled = self.scaler.transform(X_test)
            
            results = []
            
            if name == 'classification':
                model_class = KNeighborsClassifier
                score_func = accuracy_score
                ax = axes[0]
                title = 'KNN分类 - 权重方式比较'
                ylabel = '准确率'
            else:  # regression
                model_class = KNeighborsRegressor
                score_func = r2_score
                ax = axes[1]
                title = 'KNN回归 - 权重方式比较'
                ylabel = 'R² 分数'
            
            for weight in weights:
                scores = []
                for k in k_values:
                    model = model_class(n_neighbors=k, weights=weight)
                    model.fit(X_train_scaled, y_train)
                    pred = model.predict(X_test_scaled)
                    scores.append(score_func(y_test, pred))
                results.append(scores)
            
            x = np.arange(len(k_values))
            width = 0.35
            
            ax.bar(x - width/2, results[0], width, label='uniform', alpha=0.8)
            ax.bar(x + width/2, results[1], width, label='distance', alpha=0.8)
            
            ax.set_xlabel('K值')
            ax.set_ylabel(ylabel)
            ax.set_title(title)
            ax.set_xticks(x)
            ax.set_xticklabels(k_values)
            ax.legend()
            ax.grid(True, alpha=0.3)
    
    def compare_distance_metrics(self):
        """比较不同距离度量的性能"""
        # 生成数据
        X, y = make_classification(n_samples=500, n_features=4, random_state=42)
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
        X_train_scaled = self.scaler.fit_transform(X_train)
        X_test_scaled = self.scaler.transform(X_test)
        
        # 不同距离度量
        metrics = ['euclidean', 'manhattan', 'chebyshev', 'minkowski']
        k_values = range(1, 16)
        
        plt.figure(figsize=(12, 8))
        
        for metric in metrics:
            scores = []
            for k in k_values:
                if metric == 'minkowski':
                    knn = KNeighborsClassifier(n_neighbors=k, metric=metric, p=3)
                else:
                    knn = KNeighborsClassifier(n_neighbors=k, metric=metric)
                
                knn.fit(X_train_scaled, y_train)
                pred = knn.predict(X_test_scaled)
                scores.append(accuracy_score(y_test, pred))
            
            plt.plot(k_values, scores, 'o-', label=metric, markersize=4)
        
        plt.xlabel('K值')
        plt.ylabel('准确率')
        plt.title('不同距离度量的性能比较')
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.show()
    
    def algorithm_comparison(self):
        """KNN与其他算法的性能比较"""
        # 生成数据
        datasets = self.generate_datasets()
        
        # 分类算法
        classifiers = {
            'KNN': KNeighborsClassifier(n_neighbors=5),
            'Logistic Regression': LogisticRegression(random_state=42),
            'Decision Tree': DecisionTreeClassifier(random_state=42),
            'Random Forest': RandomForestClassifier(n_estimators=100, random_state=42)
        }
        
        # 回归算法
        regressors = {
            'KNN': KNeighborsRegressor(n_neighbors=5),
            'Linear Regression': LinearRegression(),
            'Decision Tree': DecisionTreeRegressor(random_state=42),
            'Random Forest': RandomForestRegressor(n_estimators=100, random_state=42)
        }
        
        fig, axes = plt.subplots(2, 2, figsize=(15, 12))
        
        # 分类性能比较
        X_cls, y_cls = datasets['classification']
        X_train, X_test, y_train, y_test = train_test_split(X_cls, y_cls, test_size=0.3, random_state=42)
        X_train_scaled = self.scaler.fit_transform(X_train)
        X_test_scaled = self.scaler.transform(X_test)
        
        cls_results = {}
        cls_times = {}
        
        for name, clf in classifiers.items():
            start_time = time.time()
            clf.fit(X_train_scaled, y_train)
            train_time = time.time() - start_time
            
            start_time = time.time()
            pred = clf.predict(X_test_scaled)
            pred_time = time.time() - start_time
            
            accuracy = accuracy_score(y_test, pred)
            cls_results[name] = accuracy
            cls_times[name] = {'train': train_time, 'predict': pred_time}
        
        # 回归性能比较
        X_reg, y_reg = datasets['regression']
        X_train, X_test, y_train, y_test = train_test_split(X_reg, y_reg, test_size=0.3, random_state=42)
        X_train_scaled = self.scaler.fit_transform(X_train)
        X_test_scaled = self.scaler.transform(X_test)
        
        reg_results = {}
        reg_times = {}
        
        for name, reg in regressors.items():
            start_time = time.time()
            reg.fit(X_train_scaled, y_train)
            train_time = time.time() - start_time
            
            start_time = time.time()
            pred = reg.predict(X_test_scaled)
            pred_time = time.time() - start_time
            
            r2 = r2_score(y_test, pred)
            reg_results[name] = r2
            reg_times[name] = {'train': train_time, 'predict': pred_time}
        
        # 绘制性能对比
        axes[0, 0].bar(cls_results.keys(), cls_results.values(), alpha=0.8)
        axes[0, 0].set_title('分类算法性能对比')
        axes[0, 0].set_ylabel('准确率')
        axes[0, 0].tick_params(axis='x', rotation=45)
        
        axes[0, 1].bar(reg_results.keys(), reg_results.values(), alpha=0.8)
        axes[0, 1].set_title('回归算法性能对比')
        axes[0, 1].set_ylabel('R² 分数')
        axes[0, 1].tick_params(axis='x', rotation=45)
        
        # 绘制时间对比
        train_times = [cls_times[name]['train'] for name in classifiers.keys()]
        pred_times = [cls_times[name]['predict'] for name in classifiers.keys()]
        
        x = np.arange(len(classifiers))
        width = 0.35
        
        axes[1, 0].bar(x - width/2, train_times, width, label='训练时间', alpha=0.8)
        axes[1, 0].bar(x + width/2, pred_times, width, label='预测时间', alpha=0.8)
        axes[1, 0].set_title('分类算法时间对比')
        axes[1, 0].set_ylabel('时间 (秒)')
        axes[1, 0].set_xticks(x)
        axes[1, 0].set_xticklabels(classifiers.keys(), rotation=45)
        axes[1, 0].legend()
        
        train_times_reg = [reg_times[name]['train'] for name in regressors.keys()]
        pred_times_reg = [reg_times[name]['predict'] for name in regressors.keys()]
        
        axes[1, 1].bar(x - width/2, train_times_reg, width, label='训练时间', alpha=0.8)
        axes[1, 1].bar(x + width/2, pred_times_reg, width, label='预测时间', alpha=0.8)
        axes[1, 1].set_title('回归算法时间对比')
        axes[1, 1].set_ylabel('时间 (秒)')
        axes[1, 1].set_xticks(x)
        axes[1, 1].set_xticklabels(regressors.keys(), rotation=45)
        axes[1, 1].legend()
        
        plt.tight_layout()
        plt.show()
        
        return cls_results, reg_results, cls_times, reg_times
    
    def learning_curve_analysis(self):
        """学习曲线分析"""
        # 生成数据
        X, y = make_classification(n_samples=2000, n_features=10, random_state=42)
        X_scaled = self.scaler.fit_transform(X)
        
        # 计算学习曲线
        train_sizes, train_scores, val_scores = learning_curve(
            KNeighborsClassifier(n_neighbors=5),
            X_scaled, y, cv=5,
            train_sizes=np.linspace(0.1, 1.0, 10),
            random_state=42
        )
        
        # 计算均值和标准差
        train_mean = np.mean(train_scores, axis=1)
        train_std = np.std(train_scores, axis=1)
        val_mean = np.mean(val_scores, axis=1)
        val_std = np.std(val_scores, axis=1)
        
        plt.figure(figsize=(10, 6))
        plt.plot(train_sizes, train_mean, 'o-', label='训练集', markersize=4)
        plt.fill_between(train_sizes, train_mean - train_std, train_mean + train_std, alpha=0.1)
        
        plt.plot(train_sizes, val_mean, 's-', label='验证集', markersize=4)
        plt.fill_between(train_sizes, val_mean - val_std, val_mean + val_std, alpha=0.1)
        
        plt.xlabel('训练样本数量')
        plt.ylabel('准确率')
        plt.title('KNN学习曲线')
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.show()
    
    def validation_curve_analysis(self):
        """验证曲线分析"""
        # 生成数据
        X, y = make_classification(n_samples=1000, n_features=5, random_state=42)
        X_scaled = self.scaler.fit_transform(X)
        
        # K值验证曲线
        param_range = range(1, 21)
        train_scores, val_scores = validation_curve(
            KNeighborsClassifier(), X_scaled, y,
            param_name='n_neighbors', param_range=param_range,
            cv=5, scoring='accuracy'
        )
        
        train_mean = np.mean(train_scores, axis=1)
        train_std = np.std(train_scores, axis=1)
        val_mean = np.mean(val_scores, axis=1)
        val_std = np.std(val_scores, axis=1)
        
        plt.figure(figsize=(10, 6))
        plt.plot(param_range, train_mean, 'o-', label='训练集', markersize=4)
        plt.fill_between(param_range, train_mean - train_std, train_mean + train_std, alpha=0.1)
        
        plt.plot(param_range, val_mean, 's-', label='验证集', markersize=4)
        plt.fill_between(param_range, val_mean - val_std, val_mean + val_std, alpha=0.1)
        
        plt.xlabel('K值')
        plt.ylabel('准确率')
        plt.title('KNN验证曲线 (K值)')
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.show()

def main():
    """主函数"""
    print("=" * 60)
    print("KNN算法综合分析和对比")
    print("=" * 60)
    
    analyzer = KNNComprehensiveAnalysis()
    
    # 1. 生成数据集
    print("\n1. 生成测试数据集...")
    datasets = analyzer.generate_datasets()
    
    # 2. K参数分析
    print("\n2. 分析K参数对性能的影响...")
    analyzer.analyze_k_parameter(datasets)
    
    # 3. 距离度量比较
    print("\n3. 比较不同距离度量...")
    analyzer.compare_distance_metrics()
    
    # 4. 算法性能对比
    print("\n4. KNN与其他算法性能对比...")
    cls_results, reg_results, cls_times, reg_times = analyzer.algorithm_comparison()
    
    # 5. 学习曲线分析
    print("\n5. 学习曲线分析...")
    analyzer.learning_curve_analysis()
    
    # 6. 验证曲线分析
    print("\n6. 验证曲线分析...")
    analyzer.validation_curve_analysis()
    
    # 7. 总结报告
    print("\n7. 性能总结报告")
    print("=" * 40)
    print("分类任务结果:")
    for name, score in cls_results.items():
        print(f"  {name}: {score:.4f}")
    
    print("\n回归任务结果:")
    for name, score in reg_results.items():
        print(f"  {name}: {score:.4f}")
    
    print("\nKNN算法特点总结:")
    print("优点:")
    print("  - 简单易懂，无需训练过程")
    print("  - 适用于多分类和回归问题")
    print("  - 对局部模式敏感")
    print("  - 可以处理非线性关系")
    
    print("缺点:")
    print("  - 计算复杂度高")
    print("  - 对维度灾难敏感")
    print("  - 需要大量存储空间")
    print("  - 对噪声和异常值敏感")
    
    print("\n参数调优建议:")
    print("  - K值: 通常选择奇数，避免平票")
    print("  - 权重: distance权重通常优于uniform")
    print("  - 距离度量: 欧几里得距离最常用")
    print("  - 数据预处理: 标准化很重要")

if __name__ == "__main__":
    main()
