"""
KNN回归任务完整示例
使用波士顿房价和合成数据集演示KNN回归算法的应用
"""

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_regression, fetch_california_housing
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.pipeline import Pipeline
import warnings
warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

class KNNRegressionDemo:
    def __init__(self):
        self.model = None
        self.scaler = StandardScaler()
        self.X_train = None
        self.X_test = None
        self.y_train = None
        self.y_test = None
        
    def load_data(self, dataset_type='california'):
        """加载数据集"""
        if dataset_type == 'california':
            # 加载加州房价数据集
            data = fetch_california_housing()
            X, y = data.data, data.target
            feature_names = data.feature_names
            print("使用加州房价数据集")
            print(f"特征数量: {X.shape[1]}")
            print(f"样本数量: {X.shape[0]}")
            print(f"特征名称: {feature_names}")
            print(f"目标变量范围: {y.min():.2f} - {y.max():.2f}")
            
        elif dataset_type == 'synthetic':
            # 生成合成回归数据集
            X, y = make_regression(
                n_samples=1000,
                n_features=2,
                noise=10,
                random_state=42
            )
            feature_names = ['Feature 1', 'Feature 2']
            print("使用合成回归数据集")
            print(f"特征数量: {X.shape[1]}")
            print(f"样本数量: {X.shape[0]}")
            print(f"目标变量范围: {y.min():.2f} - {y.max():.2f}")
            
        elif dataset_type == 'synthetic_1d':
            # 生成1维合成数据用于可视化
            np.random.seed(42)
            X = np.linspace(0, 10, 100).reshape(-1, 1)
            y = np.sin(X.ravel()) + np.random.normal(0, 0.1, X.shape[0])
            feature_names = ['X']
            print("使用1维合成数据集（用于可视化）")
            print(f"特征数量: {X.shape[1]}")
            print(f"样本数量: {X.shape[0]}")
            
        return X, y, feature_names
    
    def prepare_data(self, X, y, test_size=0.3, random_state=42):
        """数据预处理和分割"""
        # 分割训练集和测试集
        self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
            X, y, test_size=test_size, random_state=random_state
        )
        
        # 标准化特征
        self.X_train_scaled = self.scaler.fit_transform(self.X_train)
        self.X_test_scaled = self.scaler.transform(self.X_test)
        
        print(f"训练集大小: {self.X_train.shape}")
        print(f"测试集大小: {self.X_test.shape}")
        print(f"训练集目标变量范围: {self.y_train.min():.2f} - {self.y_train.max():.2f}")
        
    def train_model(self, k=5, weights='uniform', metric='minkowski', p=2):
        """训练KNN回归模型"""
        self.model = KNeighborsRegressor(
            n_neighbors=k,
            weights=weights,
            metric=metric,
            p=p
        )
        
        # 训练模型
        self.model.fit(self.X_train_scaled, self.y_train)
        print(f"KNN回归模型训练完成 (k={k}, weights={weights}, metric={metric})")
        
    def evaluate_model(self):
        """评估模型性能"""
        # 预测
        y_pred_train = self.model.predict(self.X_train_scaled)
        y_pred_test = self.model.predict(self.X_test_scaled)
        
        # 计算评估指标
        train_mse = mean_squared_error(self.y_train, y_pred_train)
        test_mse = mean_squared_error(self.y_test, y_pred_test)
        train_mae = mean_absolute_error(self.y_train, y_pred_train)
        test_mae = mean_absolute_error(self.y_test, y_pred_test)
        train_r2 = r2_score(self.y_train, y_pred_train)
        test_r2 = r2_score(self.y_test, y_pred_test)
        
        print("模型评估结果:")
        print(f"训练集 - MSE: {train_mse:.4f}, MAE: {train_mae:.4f}, R²: {train_r2:.4f}")
        print(f"测试集 - MSE: {test_mse:.4f}, MAE: {test_mae:.4f}, R²: {test_r2:.4f}")
        
        return {
            'train_mse': train_mse, 'test_mse': test_mse,
            'train_mae': train_mae, 'test_mae': test_mae,
            'train_r2': train_r2, 'test_r2': test_r2,
            'y_pred_train': y_pred_train, 'y_pred_test': y_pred_test
        }
    
    def cross_validation(self, cv=5, scoring='neg_mean_squared_error'):
        """交叉验证"""
        scores = cross_val_score(self.model, self.X_train_scaled, self.y_train, 
                               cv=cv, scoring=scoring)
        
        # 转换为正值（因为sklearn返回负的MSE）
        if scoring == 'neg_mean_squared_error':
            scores = -scores
            print(f"\n{cv}折交叉验证结果 (MSE):")
            print(f"平均MSE: {scores.mean():.4f} (+/- {scores.std() * 2:.4f})")
        else:
            print(f"\n{cv}折交叉验证结果 ({scoring}):")
            print(f"平均分数: {scores.mean():.4f} (+/- {scores.std() * 2:.4f})")
            
        return scores
    
    def hyperparameter_tuning(self):
        """超参数调优"""
        param_grid = {
            'n_neighbors': range(1, 21),
            'weights': ['uniform', 'distance'],
            'metric': ['euclidean', 'manhattan', 'minkowski']
        }
        
        grid_search = GridSearchCV(
            KNeighborsRegressor(),
            param_grid,
            cv=5,
            scoring='neg_mean_squared_error',
            n_jobs=-1
        )
        
        grid_search.fit(self.X_train_scaled, self.y_train)
        
        print("\n超参数调优结果:")
        print(f"最佳参数: {grid_search.best_params_}")
        print(f"最佳交叉验证分数 (MSE): {-grid_search.best_score_:.4f}")
        
        # 使用最佳参数重新训练模型
        self.model = grid_search.best_estimator_
        
        return grid_search.best_params_, -grid_search.best_score_
    
    def plot_predictions(self, results):
        """绘制预测结果对比图"""
        plt.figure(figsize=(15, 5))
        
        # 训练集预测对比
        plt.subplot(1, 3, 1)
        plt.scatter(self.y_train, results['y_pred_train'], alpha=0.6)
        plt.plot([self.y_train.min(), self.y_train.max()], 
                [self.y_train.min(), self.y_train.max()], 'r--', lw=2)
        plt.xlabel('真实值')
        plt.ylabel('预测值')
        plt.title(f'训练集预测对比\nR² = {results["train_r2"]:.4f}')
        plt.grid(True, alpha=0.3)
        
        # 测试集预测对比
        plt.subplot(1, 3, 2)
        plt.scatter(self.y_test, results['y_pred_test'], alpha=0.6)
        plt.plot([self.y_test.min(), self.y_test.max()], 
                [self.y_test.min(), self.y_test.max()], 'r--', lw=2)
        plt.xlabel('真实值')
        plt.ylabel('预测值')
        plt.title(f'测试集预测对比\nR² = {results["test_r2"]:.4f}')
        plt.grid(True, alpha=0.3)
        
        # 残差图
        plt.subplot(1, 3, 3)
        residuals = self.y_test - results['y_pred_test']
        plt.scatter(results['y_pred_test'], residuals, alpha=0.6)
        plt.axhline(y=0, color='r', linestyle='--')
        plt.xlabel('预测值')
        plt.ylabel('残差')
        plt.title('残差图')
        plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()
    
    def plot_k_comparison(self):
        """比较不同K值的性能"""
        k_values = range(1, 21)
        train_scores = []
        test_scores = []
        
        for k in k_values:
            knn = KNeighborsRegressor(n_neighbors=k)
            knn.fit(self.X_train_scaled, self.y_train)
            
            train_pred = knn.predict(self.X_train_scaled)
            test_pred = knn.predict(self.X_test_scaled)
            
            train_scores.append(r2_score(self.y_train, train_pred))
            test_scores.append(r2_score(self.y_test, test_pred))
        
        plt.figure(figsize=(10, 6))
        plt.plot(k_values, train_scores, 'bo-', label='训练集', markersize=4)
        plt.plot(k_values, test_scores, 'ro-', label='测试集', markersize=4)
        plt.xlabel('K值')
        plt.ylabel('R² 分数')
        plt.title('不同K值的性能比较')
        plt.legend()
        plt.grid(True, alpha=0.3)
        plt.show()
    
    def plot_1d_regression(self, X, y):
        """绘制1维回归可视化"""
        if X.shape[1] != 1:
            print("1维回归可视化仅适用于1维数据")
            return
            
        # 标准化数据
        X_scaled = self.scaler.fit_transform(X)
        
        # 创建测试点
        X_test_range = np.linspace(X_scaled.min(), X_scaled.max(), 300).reshape(-1, 1)
        
        plt.figure(figsize=(15, 5))
        
        # 比较不同K值
        k_values = [1, 5, 15]
        for i, k in enumerate(k_values):
            plt.subplot(1, 3, i+1)
            
            # 训练模型
            knn = KNeighborsRegressor(n_neighbors=k)
            knn.fit(X_scaled, y)
            
            # 预测
            y_pred = knn.predict(X_test_range)
            
            # 绘图
            plt.scatter(X_scaled, y, alpha=0.6, label='训练数据')
            plt.plot(X_test_range, y_pred, 'r-', linewidth=2, label=f'KNN (k={k})')
            plt.xlabel('X (标准化)')
            plt.ylabel('y')
            plt.title(f'KNN回归 (k={k})')
            plt.legend()
            plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()

def main():
    """主函数 - 完整的KNN回归示例"""
    print("=" * 60)
    print("KNN回归任务完整示例")
    print("=" * 60)
    
    # 创建KNN回归器实例
    knn_demo = KNNRegressionDemo()
    
    # 1. 加载数据
    print("\n1. 加载数据集")
    X, y, feature_names = knn_demo.load_data('california')
    
    # 2. 数据预处理
    print("\n2. 数据预处理")
    knn_demo.prepare_data(X, y)
    
    # 3. 训练基础模型
    print("\n3. 训练KNN回归模型")
    knn_demo.train_model(k=5)
    
    # 4. 模型评估
    print("\n4. 模型评估")
    results = knn_demo.evaluate_model()
    
    # 5. 交叉验证
    print("\n5. 交叉验证")
    cv_scores = knn_demo.cross_validation()
    
    # 6. 超参数调优
    print("\n6. 超参数调优")
    best_params, best_score = knn_demo.hyperparameter_tuning()
    
    # 7. 最终评估
    print("\n7. 使用最佳参数的最终评估")
    final_results = knn_demo.evaluate_model()
    
    # 8. 可视化结果
    print("\n8. 结果可视化")
    knn_demo.plot_predictions(final_results)
    knn_demo.plot_k_comparison()
    
    # 9. 1维回归可视化
    print("\n9. 1维回归可视化")
    X_1d, y_1d, _ = knn_demo.load_data('synthetic_1d')
    knn_demo.plot_1d_regression(X_1d, y_1d)
    
    # 10. API调用示例
    print("\n10. API调用示例")
    print("=" * 40)
    
    # 基本用法
    print("基本KNN回归器用法:")
    print("```python")
    print("from sklearn.neighbors import KNeighborsRegressor")
    print("from sklearn.preprocessing import StandardScaler")
    print("from sklearn.metrics import mean_squared_error, r2_score")
    print("")
    print("# 创建并训练模型")
    print("scaler = StandardScaler()")
    print("X_scaled = scaler.fit_transform(X_train)")
    print("knn = KNeighborsRegressor(n_neighbors=5, weights='distance')")
    print("knn.fit(X_scaled, y_train)")
    print("")
    print("# 预测")
    print("X_test_scaled = scaler.transform(X_test)")
    print("predictions = knn.predict(X_test_scaled)")
    print("")
    print("# 评估")
    print("mse = mean_squared_error(y_test, predictions)")
    print("r2 = r2_score(y_test, predictions)")
    print("```")
    
    # 高级用法
    print("\n高级用法 - 使用Pipeline:")
    print("```python")
    print("from sklearn.pipeline import Pipeline")
    print("from sklearn.model_selection import GridSearchCV")
    print("")
    print("# 创建Pipeline")
    print("pipe = Pipeline([")
    print("    ('scaler', StandardScaler()),")
    print("    ('knn', KNeighborsRegressor())")
    print("])")
    print("")
    print("# 参数网格")
    print("param_grid = {")
    print("    'knn__n_neighbors': [3, 5, 7, 9],")
    print("    'knn__weights': ['uniform', 'distance']")
    print("}")
    print("")
    print("# 网格搜索")
    print("grid_search = GridSearchCV(pipe, param_grid, cv=5)")
    print("grid_search.fit(X_train, y_train)")
    print("```")
    
    print(f"\n最终模型性能总结:")
    print(f"- 最佳参数: {best_params}")
    print(f"- 最佳MSE: {best_score:.4f}")
    print(f"- 测试集R²: {final_results['test_r2']:.4f}")
    print(f"- 测试集MAE: {final_results['test_mae']:.4f}")

def compare_knn_variants():
    """比较不同KNN变体的性能"""
    print("\n" + "=" * 60)
    print("KNN回归变体比较")
    print("=" * 60)
    
    # 生成数据
    X, y = make_regression(n_samples=500, n_features=5, noise=10, random_state=42)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
    
    # 标准化
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_test_scaled = scaler.transform(X_test)
    
    # 不同的KNN配置
    configs = [
        {'n_neighbors': 5, 'weights': 'uniform', 'metric': 'euclidean'},
        {'n_neighbors': 5, 'weights': 'distance', 'metric': 'euclidean'},
        {'n_neighbors': 3, 'weights': 'uniform', 'metric': 'manhattan'},
        {'n_neighbors': 7, 'weights': 'distance', 'metric': 'minkowski', 'p': 3},
    ]
    
    results = []
    for config in configs:
        knn = KNeighborsRegressor(**config)
        knn.fit(X_train_scaled, y_train)
        y_pred = knn.predict(X_test_scaled)
        
        mse = mean_squared_error(y_test, y_pred)
        r2 = r2_score(y_test, y_pred)
        
        results.append({
            'config': str(config),
            'mse': mse,
            'r2': r2
        })
        
        print(f"配置: {config}")
        print(f"MSE: {mse:.4f}, R²: {r2:.4f}")
        print("-" * 40)
    
    return results

if __name__ == "__main__":
    main()
    compare_knn_variants()
