import streamlit as st
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
from config.matplotlib_config import apply_matplotlib_cn


apply_matplotlib_cn()

st.set_page_config(page_title="朴素贝叶斯", page_icon="📦")

if not st.session_state.get('logged_in', False):
    st.warning("请先在首页登录以访问此内容")
    st.stop()

st.title("朴素贝叶斯算法")

tab1, tab2, tab3 = st.tabs(["原理讲解", "代码示例", "试一试"])

with tab1:
    st.header("朴素贝叶斯算法原理")
    st.markdown("""
        朴素贝叶斯是一种基于贝叶斯定理和特征条件独立假设的分类方法。

        ### 贝叶斯定理
        $P(y|X) = \\frac{P(X|y)P(y)}{P(X)}$

        其中：
        - $P(y|X)$ 是后验概率：给定特征X下，类别y的概率
        - $P(X|y)$ 是似然概率：给定类别y下，特征X的概率
        - $P(y)$ 是先验概率：类别y的概率
        - $P(X)$ 是证据：特征X的概率

        ### 朴素假设
        "朴素"指的是假设所有特征之间相互独立：
        $P(X|y) = P(x_1|y) \\times P(x_2|y) \\times ... \\times P(x_n|y)$

        ### 优缺点
        **优点**：
        - 算法简单，训练速度快
        - 对小规模数据表现良好
        - 适合多分类任务
        - 对缺失数据不敏感

        **缺点**：
        - 特征独立性假设在现实中往往不成立
        - 需要知道先验概率
        - 对输入数据的表达形式敏感
        """)

with tab2:
    st.header("朴素贝叶斯代码示例")
    st.code(
        """
from sklearn.naive_bayes import GaussianNB
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, classification_report

# 加载数据
iris = load_iris()
X, y = iris.data, iris.target

# 划分训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# 创建朴素贝叶斯模型
model = GaussianNB()

# 训练模型
model.fit(X_train, y_train)

# 预测
y_pred = model.predict(X_test)

# 评估模型
accuracy = accuracy_score(y_test, y_pred)
print(f"准确率: {accuracy:.2f}")
print("分类报告:")
print(classification_report(y_test, y_pred))
        """,
        language="python",
    )

with tab3:
    st.header("朴素贝叶斯演示")

    iris = load_iris()
    X, y = iris.data, iris.target
    feature_names = iris.feature_names
    target_names = iris.target_names

    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.3, random_state=42
    )

    col1, col2 = st.columns(2)

    with col1:
        var_smoothing_exp = st.slider(
            "平滑参数 (指数)", -12, -6, -9, format="10^%d", help="var_smoothing = 10^value"
        )
        var_smoothing = 10 ** var_smoothing_exp
        st.write(f"当前值: {var_smoothing:.2e}")

        sample_idx = st.slider("选择样本索引", 0, len(X_test) - 1, 0)
        sample = X_test[sample_idx].reshape(1, -1)

        model = GaussianNB(var_smoothing=var_smoothing)
        model.fit(X_train, y_train)

        y_pred = model.predict(X_test)
        accuracy = accuracy_score(y_test, y_pred)
        st.metric("准确率", f"{accuracy:.4f}")

        probas = model.predict_proba(sample)[0]
        st.write("样本特征:", sample[0])
        st.write("真实标签:", y_test[sample_idx])
        st.write("预测概率:", probas)
        st.write("预测标签:", np.argmax(probas))

        fig, ax = plt.subplots(figsize=(8, 4))
        bars = ax.bar(range(len(probas)), probas)
        ax.set_xlabel('类别')
        ax.set_ylabel('概率')
        ax.set_title('样本的类别概率分布')
        ax.set_xticks(range(len(probas)))
        ax.set_xticklabels(target_names)
        for i, bar in enumerate(bars):
            height = bar.get_height()
            ax.text(bar.get_x() + bar.get_width() / 2., height, f'{probas[i]:.3f}', ha='center', va='bottom')
        st.pyplot(fig)

    with col2:
        feature_x = st.selectbox(
            "X轴特征", range(len(feature_names)), format_func=lambda x: feature_names[x], index=0
        )
        feature_y = st.selectbox(
            "Y轴特征", range(len(feature_names)), format_func=lambda x: feature_names[x], index=1
        )

        X_selected = X[:, [feature_x, feature_y]]

        X_train_sel, X_test_sel, y_train_sel, y_test_sel = train_test_split(
            X_selected, y, test_size=0.3, random_state=42
        )

        model_sel = GaussianNB(var_smoothing=var_smoothing)
        model_sel.fit(X_train_sel, y_train_sel)

        h = 0.02
        x_min, x_max = X_selected[:, 0].min() - 1, X_selected[:, 0].max() + 1
        y_min, y_max = X_selected[:, 1].min() - 1, X_selected[:, 1].max() + 1
        xx, yy = np.meshgrid(
            np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)
        )
        Z = model_sel.predict(np.c_[xx.ravel(), yy.ravel()])
        Z = Z.reshape(xx.shape)

        fig, ax = plt.subplots(figsize=(10, 8))
        plt.contourf(xx, yy, Z, alpha=0.4, cmap=plt.cm.RdYlBu)
        scatter = plt.scatter(
            X_selected[:, 0], X_selected[:, 1], c=y, s=20, edgecolor='k', cmap=plt.cm.RdYlBu
        )
        plt.colorbar(scatter)
        plt.title("朴素贝叶斯决策边界")
        plt.xlabel(feature_names[feature_x])
        plt.ylabel(feature_names[feature_y])
        st.pyplot(fig) 