"""
KNN算法API调用参考手册
包含最常用的API调用方式和参数说明
"""

# ============================================================================
# 1. 基本导入
# ============================================================================
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score, classification_report, mean_squared_error, r2_score
import numpy as np

# ============================================================================
# 2. KNN分类器 - 基本用法
# ============================================================================

def knn_classification_basic():
    """KNN分类器基本用法"""
    
    # 创建分类器
    knn_clf = KNeighborsClassifier(
        n_neighbors=5,          # K值，默认5
        weights='uniform',      # 权重方式: 'uniform' 或 'distance'
        algorithm='auto',       # 算法: 'auto', 'ball_tree', 'kd_tree', 'brute'
        metric='minkowski',     # 距离度量: 'euclidean', 'manhattan', 'minkowski'
        p=2                     # 闵可夫斯基距离的参数，p=2为欧几里得距离
    )
    
    # 训练模型
    # knn_clf.fit(X_train, y_train)
    
    # 预测
    # predictions = knn_clf.predict(X_test)
    # probabilities = knn_clf.predict_proba(X_test)  # 预测概率
    
    return knn_clf

# ============================================================================
# 3. KNN回归器 - 基本用法
# ============================================================================

def knn_regression_basic():
    """KNN回归器基本用法"""
    
    # 创建回归器
    knn_reg = KNeighborsRegressor(
        n_neighbors=5,          # K值
        weights='uniform',      # 权重方式
        algorithm='auto',       # 算法选择
        metric='minkowski',     # 距离度量
        p=2                     # 距离参数
    )
    
    # 训练和预测
    # knn_reg.fit(X_train, y_train)
    # predictions = knn_reg.predict(X_test)
    
    return knn_reg

# ============================================================================
# 4. 完整的分类示例
# ============================================================================

def complete_classification_example():
    """完整的KNN分类示例"""
    
    # 假设已有数据 X, y
    from sklearn.datasets import load_iris
    
    # 1. 加载数据
    data = load_iris()
    X, y = data.data, data.target
    
    # 2. 数据分割
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.3, random_state=42, stratify=y
    )
    
    # 3. 数据标准化
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_test_scaled = scaler.transform(X_test)
    
    # 4. 创建和训练模型
    knn = KNeighborsClassifier(n_neighbors=5, weights='distance')
    knn.fit(X_train_scaled, y_train)
    
    # 5. 预测
    y_pred = knn.predict(X_test_scaled)
    y_proba = knn.predict_proba(X_test_scaled)
    
    # 6. 评估
    accuracy = accuracy_score(y_test, y_pred)
    report = classification_report(y_test, y_pred)
    
    print(f"准确率: {accuracy:.4f}")
    print("分类报告:")
    print(report)
    
    return knn, accuracy

# ============================================================================
# 5. 完整的回归示例
# ============================================================================

def complete_regression_example():
    """完整的KNN回归示例"""
    
    from sklearn.datasets import make_regression
    
    # 1. 生成数据
    X, y = make_regression(n_samples=1000, n_features=5, noise=10, random_state=42)
    
    # 2. 数据分割
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.3, random_state=42
    )
    
    # 3. 数据标准化
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_test_scaled = scaler.transform(X_test)
    
    # 4. 创建和训练模型
    knn = KNeighborsRegressor(n_neighbors=5, weights='distance')
    knn.fit(X_train_scaled, y_train)
    
    # 5. 预测
    y_pred = knn.predict(X_test_scaled)
    
    # 6. 评估
    mse = mean_squared_error(y_test, y_pred)
    r2 = r2_score(y_test, y_pred)
    
    print(f"MSE: {mse:.4f}")
    print(f"R²: {r2:.4f}")
    
    return knn, r2

# ============================================================================
# 6. 超参数调优
# ============================================================================

def hyperparameter_tuning_example():
    """超参数调优示例"""
    
    from sklearn.datasets import load_iris
    
    # 加载数据
    data = load_iris()
    X, y = data.data, data.target
    
    # 数据预处理
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_test_scaled = scaler.transform(X_test)
    
    # 参数网格
    param_grid = {
        'n_neighbors': [3, 5, 7, 9, 11],
        'weights': ['uniform', 'distance'],
        'metric': ['euclidean', 'manhattan', 'minkowski']
    }
    
    # 网格搜索
    grid_search = GridSearchCV(
        KNeighborsClassifier(),
        param_grid,
        cv=5,                    # 5折交叉验证
        scoring='accuracy',      # 评分标准
        n_jobs=-1               # 使用所有CPU核心
    )
    
    # 训练
    grid_search.fit(X_train_scaled, y_train)
    
    # 最佳参数
    best_params = grid_search.best_params_
    best_score = grid_search.best_score_
    
    print(f"最佳参数: {best_params}")
    print(f"最佳交叉验证分数: {best_score:.4f}")
    
    # 使用最佳模型预测
    best_model = grid_search.best_estimator_
    y_pred = best_model.predict(X_test_scaled)
    accuracy = accuracy_score(y_test, y_pred)
    
    print(f"测试集准确率: {accuracy:.4f}")
    
    return best_model, best_params

# ============================================================================
# 7. 使用Pipeline
# ============================================================================

def pipeline_example():
    """使用Pipeline的示例"""
    
    from sklearn.pipeline import Pipeline
    from sklearn.datasets import load_iris
    
    # 加载数据
    data = load_iris()
    X, y = data.data, data.target
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
    
    # 创建Pipeline
    pipe = Pipeline([
        ('scaler', StandardScaler()),                    # 标准化
        ('knn', KNeighborsClassifier(n_neighbors=5))     # KNN分类器
    ])
    
    # 训练
    pipe.fit(X_train, y_train)
    
    # 预测
    y_pred = pipe.predict(X_test)
    accuracy = accuracy_score(y_test, y_pred)
    
    print(f"Pipeline准确率: {accuracy:.4f}")
    
    # Pipeline中的超参数调优
    param_grid = {
        'knn__n_neighbors': [3, 5, 7, 9],
        'knn__weights': ['uniform', 'distance']
    }
    
    grid_search = GridSearchCV(pipe, param_grid, cv=5)
    grid_search.fit(X_train, y_train)
    
    print(f"Pipeline最佳参数: {grid_search.best_params_}")
    
    return pipe, grid_search.best_estimator_

# ============================================================================
# 8. 常用参数说明
# ============================================================================

"""
KNN主要参数详解:

1. n_neighbors (int, default=5)
   - K值，即最近邻的数量
   - 较小的K值：模型复杂度高，容易过拟合
   - 较大的K值：模型复杂度低，可能欠拟合
   - 建议：从3-15之间选择奇数值

2. weights (str or callable, default='uniform')
   - 'uniform': 所有邻居权重相等
   - 'distance': 权重与距离成反比
   - 建议：通常'distance'效果更好

3. algorithm (str, default='auto')
   - 'auto': 自动选择最合适的算法
   - 'ball_tree': 球树算法，适用于高维数据
   - 'kd_tree': KD树算法，适用于低维数据
   - 'brute': 暴力搜索，适用于小数据集

4. metric (str, default='minkowski')
   - 'euclidean': 欧几里得距离
   - 'manhattan': 曼哈顿距离
   - 'minkowski': 闵可夫斯基距离
   - 'chebyshev': 切比雪夫距离

5. p (int, default=2)
   - 闵可夫斯基距离的参数
   - p=1: 曼哈顿距离
   - p=2: 欧几里得距离

6. n_jobs (int, default=None)
   - 并行计算的CPU核心数
   - -1: 使用所有可用核心
"""

# ============================================================================
# 9. 最佳实践
# ============================================================================

def best_practices():
    """KNN最佳实践"""
    
    print("KNN算法最佳实践:")
    print("1. 数据预处理:")
    print("   - 必须进行特征标准化/归一化")
    print("   - 处理缺失值")
    print("   - 移除或处理异常值")
    
    print("\n2. 参数选择:")
    print("   - K值选择奇数，避免平票")
    print("   - 使用交叉验证选择最佳K值")
    print("   - 优先考虑'distance'权重")
    
    print("\n3. 性能优化:")
    print("   - 对于大数据集，考虑降维")
    print("   - 使用合适的距离度量")
    print("   - 考虑使用近似最近邻算法")
    
    print("\n4. 适用场景:")
    print("   - 小到中等规模数据集")
    print("   - 非线性分类/回归问题")
    print("   - 局部模式重要的问题")

# ============================================================================
# 10. 快速参考
# ============================================================================

def quick_reference():
    """快速参考代码"""
    
    # 最简单的KNN分类
    """
    from sklearn.neighbors import KNeighborsClassifier
    knn = KNeighborsClassifier(n_neighbors=5)
    knn.fit(X_train, y_train)
    predictions = knn.predict(X_test)
    """
    
    # 最简单的KNN回归
    """
    from sklearn.neighbors import KNeighborsRegressor
    knn = KNeighborsRegressor(n_neighbors=5)
    knn.fit(X_train, y_train)
    predictions = knn.predict(X_test)
    """
    
    # 带标准化的完整流程
    """
    from sklearn.preprocessing import StandardScaler
    from sklearn.neighbors import KNeighborsClassifier
    
    scaler = StandardScaler()
    X_train_scaled = scaler.fit_transform(X_train)
    X_test_scaled = scaler.transform(X_test)
    
    knn = KNeighborsClassifier(n_neighbors=5, weights='distance')
    knn.fit(X_train_scaled, y_train)
    predictions = knn.predict(X_test_scaled)
    """

if __name__ == "__main__":
    print("KNN算法API参考手册")
    print("=" * 50)
    
    # 运行示例
    print("\n1. 分类示例:")
    complete_classification_example()
    
    print("\n2. 回归示例:")
    complete_regression_example()
    
    print("\n3. 超参数调优:")
    hyperparameter_tuning_example()
    
    print("\n4. 最佳实践:")
    best_practices()
