import streamlit as st
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from config.matplotlib_config import apply_matplotlib_cn


apply_matplotlib_cn()

st.set_page_config(page_title="KNN算法", page_icon="🔷")

if not st.session_state.get('logged_in', False):
    st.warning("请先在首页登录以访问此内容")
    st.stop()

st.title("KNN算法 (K-Nearest Neighbors)")

tab1, tab2, tab3 = st.tabs(["原理讲解", "代码示例", "试一试"])

with tab1:
    st.header("KNN算法原理")
    st.markdown("""
        K最近邻(K-Nearest Neighbors)是一种基本的分类与回归方法。

        ### 工作原理
        1. 计算测试数据与各个训练数据之间的距离
        2. 按照距离的递增关系进行排序
        3. 选取距离最小的K个点
        4. 确定前K个点所在类别的出现频率
        5. 返回前K个点中出现频率最高的类别作为测试数据的预测分类

        ### 距离度量
        常用的距离度量方法包括：
        - 欧氏距离：$d(x,y) = \\sqrt{\\sum_{i=1}^{n}(x_i - y_i)^2}$
        - 曼哈顿距离：$d(x,y) = \\sum_{i=1}^{n}|x_i - y_i|$
        - 闵可夫斯基距离：$d(x,y) = (\\sum_{i=1}^{n}|x_i - y_i|^p)^{1/p}$

        ### 优缺点
        **优点**：
        - 简单易懂，易于实现
        - 无需训练过程，适合增量学习
        - 对异常值不敏感

        **缺点**：
        - 计算复杂度高，需要计算与所有样本的距离
        - 内存消耗大，需要存储所有训练数据
        - 对不均衡数据集比较敏感
        """)

with tab2:
    st.header("KNN算法代码示例")
    st.code(
        """
from sklearn.neighbors import KNeighborsClassifier
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score

# 加载数据
iris = load_iris()
X, y = iris.data, iris.target

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 创建KNN分类器
knn = KNeighborsClassifier(n_neighbors=3)

# 训练模型
knn.fit(X_train, y_train)

# 预测
y_pred = knn.predict(X_test)

# 评估模型
accuracy = accuracy_score(y_test, y_pred)
print(f"准确率: {accuracy:.2f}")
        """,
        language="python",
    )

with tab3:
    st.header("KNN算法演示")

    X, y = make_classification(
        n_samples=200,
        n_features=2,
        n_redundant=0,
        n_informative=2,
        n_clusters_per_class=1,
        random_state=42,
    )
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.3, random_state=42
    )

    col1, col2 = st.columns(2)

    with col1:
        k_value = st.slider("选择K值", 1, 15, 5)
        weights = st.selectbox("权重", ["uniform", "distance"])

        knn = KNeighborsClassifier(n_neighbors=k_value, weights=weights)
        knn.fit(X_train, y_train)
        y_pred = knn.predict(X_test)
        accuracy = accuracy_score(y_test, y_pred)
        st.metric("准确率", f"{accuracy:.4f}")

    with col2:
        h = 0.02
        x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
        y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
        xx, yy = np.meshgrid(
            np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)
        )
        Z = knn.predict(np.c_[xx.ravel(), yy.ravel()])
        Z = Z.reshape(xx.shape)

        fig, ax = plt.subplots(figsize=(8, 6))
        plt.contourf(xx, yy, Z, alpha=0.4, cmap=plt.cm.RdYlBu)
        plt.scatter(
            X[:, 0], X[:, 1], c=y, s=20, edgecolor='k', cmap=plt.cm.RdYlBu
        )
        plt.title(f"KNN决策边界 (K={k_value})")
        plt.xlabel("特征1")
        plt.ylabel("特征2")
        st.pyplot(fig) 