import numpy as np
import matplotlib.pyplot as plt
import streamlit as st
from matplotlib import cm

st.title("深度学习平台")

# Initialize session state variables if they don't exist
if 'w' not in st.session_state:
    st.session_state.w = 0
if 'b' not in st.session_state:
    st.session_state.b = 0
if 'data' not in st.session_state:
    st.session_state.data = None
if 'loss_history' not in st.session_state:
    st.session_state.loss_history = []
if 'params_history' not in st.session_state:
    st.session_state.params_history = []

# Input for data points
input_data = st.text_input("请输入数据点（格式：x1,y1;x2,y2;...）:", value="0,0;1,1")

# Parse input data
if input_data:
    try:
        points = [point.split(',') for point in input_data.split(';')]
        data = np.array(points, dtype=float)
        st.session_state.data = data
        st.write(f"已输入 {len(data)} 个数据点")
    except:
        st.error("输入格式错误，请使用格式：x1,y1;x2,y2;...")

# Only proceed if we have data
if st.session_state.data is not None:
    x_data = st.session_state.data[:, 0]
    y_data = st.session_state.data[:, 1]

    # Hyperparameters
    lr = st.sidebar.slider("学习率", 0.001, 1.0, 0.01)
    epochs = st.sidebar.slider("训练轮数", 1, 1000, 100)


    def sigmoid(x):
        return 1 / (1 + np.exp(-x))


    def f(w, x, b):
        return w * x + b


    def loss_fn(y_true, y_pre):
        return np.mean((y_true - y_pre) ** 2)


    # Buttons for operations
    col1, col2, col3 = st.columns(3)

    with col1:
        if st.button("前向传播"):
            z = st.session_state.w * x_data + st.session_state.b
            a = sigmoid(z)
            st.session_state.a = a
            st.write("前向传播完成")
            st.write(f"预测值: {a}")

    with col2:
        if st.button("反向传播"):
            if 'a' not in st.session_state:
                st.error("请先进行前向传播")
            else:
                a = st.session_state.a
                deda = -2 * (y_data - a)
                dadz = a * (1 - a)
                dzdw = x_data
                dzdb = 1

                gd_w = np.mean(deda * dadz * dzdw)
                gd_b = np.mean(deda * dadz * dzdb)

                # 更新参数
                st.session_state.w = st.session_state.w - lr * gd_w
                st.session_state.b = st.session_state.b - lr * gd_b

                # 记录历史
                loss = loss_fn(y_data, a)
                st.session_state.loss_history.append(loss)
                st.session_state.params_history.append((st.session_state.w, st.session_state.b))

                st.write(f"更新后的参数: w={st.session_state.w:.4f}, b={st.session_state.b:.4f}")

    with col3:
        if st.button("训练模型"):
            st.session_state.loss_history = []
            st.session_state.params_history = []

            # 初始化参数
            w = 0
            b = 0

            for epoch in range(epochs):
                # 前向传播
                z = w * x_data + b
                a = sigmoid(z)

                # 反向传播
                deda = -2 * (y_data - a)
                dadz = a * (1 - a)
                dzdw = x_data
                dzdb = 1

                gd_w = np.mean(deda * dadz * dzdw)
                gd_b = np.mean(deda * dadz * dzdb)

                # 更新参数
                w = w - lr * gd_w
                b = b - lr * gd_b

                # 记录历史
                loss = loss_fn(y_data, a)
                st.session_state.loss_history.append(loss)
                st.session_state.params_history.append((w, b))

            st.session_state.w = w
            st.session_state.b = b
            st.write(f"训练完成! 最终参数: w={w:.4f}, b={b:.4f}")
            st.write(f"最终损失: {st.session_state.loss_history[-1]:.4f}")

    # Visualization buttons
    # if st.button("散点图和拟合曲线"):
    #     if st.session_state.data is None or len(st.session_state.data) == 0:
    #         st.warning("请先输入数据点")
    #     else:
    #         fig, ax = plt.subplots(figsize=(10, 6))
    #
    #         # 绘制原始数据点
    #         ax.scatter(x_data, y_data, color="blue", label="数据点", s=50, alpha=0.7)
    #
    #         # 生成拟合曲线
    #         x_min, x_max = x_data.min() - 0.1, x_data.max() + 0.1
    #         x_values = np.linspace(x_min, x_max, 100)
    #         y_values = sigmoid(st.session_state.w * x_values + st.session_state.b)
    #
    #         # 绘制拟合曲线
    #         ax.plot(x_values, y_values, "r-", linewidth=2, label="Sigmoid拟合曲线")
    #
    #         # 添加预测值点（如果已经进行过前向传播）
    #         if 'a' in st.session_state:
    #             ax.scatter(x_data, st.session_state.a, color="green", marker='x',
    #                        s=60, label="预测值", alpha=0.8)
    #
    #         # 设置图表属性
    #         ax.set_xlabel("x", fontsize=12)
    #         ax.set_ylabel("y", fontsize=12)
    #         ax.set_title("数据点和Sigmoid拟合曲线", fontsize=14)
    #         ax.legend(loc='best')
    #         ax.grid(True, alpha=0.3)
    #
    #         # 显示参数信息
    #         eq_text = f"y = σ({st.session_state.w:.4f}x + {st.session_state.b:.4f})"
    #         ax.text(0.05, 0.95, eq_text, transform=ax.transAxes, fontsize=10,
    #                 verticalalignment='top', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5))
    #
    #         st.pyplot(fig)
    #
    #         # 显示模型评估指标（如果已经进行过前向传播）
    #         if 'a' in st.session_state:
    #             mse = np.mean((y_data - st.session_state.a) ** 2)
    #             st.write(f"均方误差 (MSE): {mse:.4f}")
    if st.button("散点图和拟合曲线"):
        if st.session_state.data is None or len(st.session_state.data) == 0:
            st.warning("请先输入数据点")
        else:
            # 创建包含两个子图的图形
            fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))

            # 第一个子图：原始数据和Sigmoid拟合曲线
            ax1.scatter(x_data, y_data, color="blue", label="数据点", s=50, alpha=0.7)

            # 生成Sigmoid拟合曲线
            x_min, x_max = x_data.min() - 0.1, x_data.max() + 0.1
            x_values = np.linspace(x_min, x_max, 100)
            y_values_sigmoid = sigmoid(st.session_state.w * x_values + st.session_state.b)
            ax1.plot(x_values, y_values_sigmoid, "r-", linewidth=2, label="Sigmoid拟合曲线")

            # 添加预测值点（如果已经进行过前向传播）
            if 'a' in st.session_state:
                ax1.scatter(x_data, st.session_state.a, color="green", marker='x',
                            s=60, label="预测值", alpha=0.8)

            # 设置第一个子图属性
            ax1.set_xlabel("x", fontsize=12)
            ax1.set_ylabel("y", fontsize=12)
            ax1.set_title("Sigmoid拟合曲线", fontsize=14)
            ax1.legend(loc='best')
            ax1.grid(True, alpha=0.3)

            # 显示参数信息
            eq_text = f"y = σ({st.session_state.w:.4f}x + {st.session_state.b:.4f})"
            ax1.text(0.05, 0.95, eq_text, transform=ax1.transAxes, fontsize=10,
                     verticalalignment='top', bbox=dict(boxstyle='round', facecolor='wheat', alpha=0.5))

            # 第二个子图：线性部分
            ax2.scatter(x_data, y_data, color="blue", label="数据点", s=50, alpha=0.7)

            # 绘制线性部分 w*x + b
            y_values_linear = st.session_state.w * x_values + st.session_state.b
            ax2.plot(x_values, y_values_linear, "purple", linewidth=2, label="线性部分: w*x + b")

            # 添加水平线 y=0 作为参考
            ax2.axhline(y=0, color='gray', linestyle='--', alpha=0.7)

            # 设置第二个子图属性
            ax2.set_xlabel("x", fontsize=12)
            ax2.set_ylabel("z = w*x + b", fontsize=12)
            ax2.set_title("线性部分", fontsize=14)
            ax2.legend(loc='best')
            ax2.grid(True, alpha=0.3)

            # 调整布局
            plt.tight_layout()

            # 显示图形
            st.pyplot(fig)

            # 显示模型评估信息
            if 'a' in st.session_state:
                st.subheader("模型评估")
                col1, col2, col3 = st.columns(3)
                with col1:
                    mse = np.mean((y_data - st.session_state.a) ** 2)
                    st.metric("均方误差 (MSE)", f"{mse:.4f}")
                with col2:
                    # 计算二分类准确率（如果y是0/1标签）
                    if set(y_data).issubset({0, 1}):
                        predictions = (st.session_state.a > 0.5).astype(int)
                        accuracy = np.mean(predictions == y_data)
                        st.metric("准确率", f"{accuracy:.2%}")
                with col3:
                    st.metric("权重 (w)", f"{st.session_state.w:.4f}")
                    st.metric("偏置 (b)", f"{st.session_state.b:.4f}")

    if st.button("损失函数历史"):
        if not st.session_state.loss_history:
            st.error("请先训练模型或进行反向传播")
        else:
            fig, ax = plt.subplots(figsize=(10, 6))
            ax.plot(st.session_state.loss_history)
            ax.set_xlabel("迭代次数")
            ax.set_ylabel("损失值")
            ax.set_title("损失函数变化")
            ax.grid(True)
            st.pyplot(fig)

    if st.button("梯度下降3D图"):
        # 生成网格数据
        w_values = np.linspace(-10, 10, 50)
        b_values = np.linspace(-10, 10, 50)
        W, B = np.meshgrid(w_values, b_values)

        # 计算损失
        loss_values = np.zeros_like(W)
        for i in range(len(w_values)):
            for j in range(len(b_values)):
                z = W[j, i] * x_data + B[j, i]
                a = sigmoid(z)
                loss_values[j, i] = loss_fn(y_data, a)

        # 创建3D图
        fig = plt.figure(figsize=(12, 8))
        ax = fig.add_subplot(111, projection='3d')
        surf = ax.plot_surface(W, B, loss_values, cmap=cm.viridis, alpha=0.8)

        # 标记当前参数位置
        if st.session_state.params_history:
            # 只显示最后几个点以避免过于拥挤
            recent_params = st.session_state.params_history[-min(20, len(st.session_state.params_history)):]
            w_vals = [p[0] for p in recent_params]
            b_vals = [p[1] for p in recent_params]
            loss_vals = [loss_fn(y_data, sigmoid(w * x_data + b)) for w, b in recent_params]

            ax.scatter(w_vals, b_vals, loss_vals, color='red', s=50, marker='o')

        ax.set_xlabel('权重 w')
        ax.set_ylabel('偏置 b')
        ax.set_zlabel('损失')
        ax.set_title('损失函数曲面和梯度下降路径')
        fig.colorbar(surf, ax=ax, shrink=0.5, aspect=5)
        st.pyplot(fig)

    if st.button("等高线图"):
        # 生成网格数据
        w_values = np.linspace(-10, 10, 50)
        b_values = np.linspace(-10, 10, 50)
        W, B = np.meshgrid(w_values, b_values)

        # 计算损失
        loss_values = np.zeros_like(W)
        for i in range(len(w_values)):
            for j in range(len(b_values)):
                z = W[j, i] * x_data + B[j, i]
                a = sigmoid(z)
                loss_values[j, i] = loss_fn(y_data, a)

        # 创建等高线图
        fig, ax = plt.subplots(figsize=(10, 8))
        contour = ax.contour(W, B, loss_values, 20, cmap=cm.viridis)
        ax.clabel(contour, inline=True, fontsize=8)

        # 标记当前参数位置
        if st.session_state.params_history:
            # 只显示最后几个点以避免过于拥挤
            recent_params = st.session_state.params_history[-min(20, len(st.session_state.params_history)):]
            w_vals = [p[0] for p in recent_params]
            b_vals = [p[1] for p in recent_params]

            ax.plot(w_vals, b_vals, 'ro-', markersize=4, linewidth=1)
            ax.plot(w_vals[-1], b_vals[-1], 'ro', markersize=8)

        ax.set_xlabel('权重 w')
        ax.set_ylabel('偏置 b')
        ax.set_title('损失函数等高线图和梯度下降路径')
        plt.colorbar(contour, ax=ax)
        st.pyplot(fig)



# import numpy as np
# import matplotlib.pyplot as plt
# import streamlit as st
#
# # -------------------------- 1. 页面初始化与全局状态管理 --------------------------
# st.title("📊 深度学习平台（逻辑回归可视化）")
#
# # 初始化SessionState，存储全局变量（避免页面刷新丢失）
# if 'initialized' not in st.session_state:
#     # 模型参数
#     st.session_state.w = 0.0  # 权重
#     st.session_state.b = 0.0  # 偏置
#     st.session_state.lr = 0.01  # 学习率
#     st.session_state.epochs = 100  # 训练轮次
#     st.session_state.loss_history = []  # 损失函数历史（用于可视化）
#     # 数据相关（关键修复：用独立状态存储解析后的数据，不修改widget关联状态）
#     st.session_state.x_data = None  # 输入特征（解析后）
#     st.session_state.y_data = None  # 标签（0/1，适配sigmoid，解析后）
#     # 图表相关
#     st.session_state.fig = None  # 全局图表对象
#     st.session_state.W = None  # 3D图权重网格
#     st.session_state.B = None  # 3D图偏置网格
#     st.session_state.loss_values = None  # 3D图损失值网格
#     # 标记初始化完成
#     st.session_state.initialized = True
#
#
# # -------------------------- 2. 核心函数定义 --------------------------
# def sigmoid(x):
#     """sigmoid激活函数：将输出映射到[0,1]"""
#     return 1 / (1 + np.exp(-x))
#
#
# def forward_propagation(w, b, x):
#     """前向传播：计算模型预测值"""
#     z = w * x + b
#     a = sigmoid(z)
#     return z, a
#
#
# def loss_fn(y_true, y_pre):
#     """损失函数：均方误差（MSE），适配分类任务的连续预测值"""
#     return np.mean((y_true - y_pre) ** 2)
#
#
# def backward_propagation(w, b, x, y_true, y_pre):
#     """反向传播：计算权重和偏置的梯度"""
#     m = len(x)  # 样本数量
#     # 链式求导（MSE损失对w、b的梯度）
#     deda = -2 * (y_true - y_pre)  # 损失对预测值a的导数
#     dadz = y_pre * (1 - y_pre)  # a对z的导数（sigmoid导数特性）
#     dzdw = x  # z对w的导数
#     dzdb = 1  # z对b的导数
#     # 平均梯度（避免样本数量影响）
#     grad_w = np.mean(deda * dadz * dzdw)
#     grad_b = np.mean(deda * dadz * dzdb)
#     return grad_w, grad_b
#
#
# def init_3d_grid(w_range=(-20, 80), b_range=(-20, 80), num_points=100):
#     """初始化3D图和等高线图的权重、偏置网格"""
#     w_values = np.linspace(w_range[0], w_range[1], num_points)
#     b_values = np.linspace(b_range[0], b_range[1], num_points)
#     W, B = np.meshgrid(w_values, b_values)
#     # 计算每个(w,b)对应的损失值
#     loss_values = np.zeros_like(W)
#     for i in range(len(w_values)):
#         for j in range(len(b_values)):
#             _, y_pre = forward_propagation(w_values[i], b_values[j], st.session_state.x_data)
#             loss_values[j, i] = loss_fn(st.session_state.y_data, y_pre)
#     return W, B, loss_values
#
#
# # -------------------------- 3. 数据输入模块（核心修复点） --------------------------
# st.subheader("1. 数据输入（逻辑回归二分类数据）")
# st.info("请输入二维数据点（每行1个样本，特征x与标签y用逗号分隔，样本间用分号分隔）")
# st.info("示例：1.2,0;2.5,0;3.1,1;4.8,1;5.3,1（x为特征，y为0/1标签）")
#
# # 用户数据输入框（关键：不修改该widget关联的st.session_state.data_input）
# data_input = st.text_area(
#     "数据点输入",
#     value="1.2,0;2.5,0;3.1,1;4.8,1;5.3,1",  # 默认示例数据，避免空输入
#     height=100,
#     key="data_input"  # widget的key，其关联状态不可修改
# )
#
# # 数据解析按钮
# if st.button("📥 解析数据", key="parse_data"):
#     try:
#         # 清空历史数据（仅清空解析后的结果，不修改widget输入）
#         st.session_state.x_data = None
#         st.session_state.y_data = None
#         st.session_state.loss_history = []
#
#         # 解析输入（分号分隔样本，逗号分隔x和y）
#         samples = [s.strip() for s in data_input.split(";") if s.strip()]
#         if not samples:
#             st.error("请输入有效的数据点！")
#             st.stop()
#
#         # 提取x和y（确保y是0/1标签，适配sigmoid分类）
#         x_list = []
#         y_list = []
#         for idx, sample in enumerate(samples):
#             if "," not in sample:
#                 st.error(f"第{idx + 1}个样本格式错误（需用逗号分隔x和y）：{sample}")
#                 st.stop()
#             x_str, y_str = sample.split(",")
#             x = float(x_str.strip())
#             y = float(y_str.strip())
#             # 校验y是否为0/1（逻辑回归二分类）
#             if y not in (0.0, 1.0):
#                 st.error(f"第{idx + 1}个样本标签错误（仅支持0/1）：{y}")
#                 st.stop()
#             x_list.append(x)
#             y_list.append(y)
#
#         # 转换为NumPy数组并存储（仅存储解析结果，不修改widget输入状态）
#         st.session_state.x_data = np.array(x_list)
#         st.session_state.y_data = np.array(y_list)
#         st.success(f"✅ 数据解析成功！共{len(samples)}个样本")
#         st.write(f"特征x：{st.session_state.x_data}")
#         st.write(f"标签y：{st.session_state.y_data}")
#
#         # 初始化3D网格（为后续3D图/等高线图准备）
#         if st.session_state.x_data is not None:
#             st.session_state.W, st.session_state.B, st.session_state.loss_values = init_3d_grid()
#
#     except Exception as e:
#         st.error(f"数据解析失败：{str(e)}")
#
# # -------------------------- 4. 模型操作模块 --------------------------
# st.subheader("2. 模型操作（逻辑回归训练与可视化）")
# # 检查数据是否已解析
# if st.session_state.x_data is None:
#     st.warning("⚠️ 请先在「数据输入」模块解析有效的二分类数据！")
# else:
#     # 创建操作按钮（一行排列，美观紧凑）
#     col1, col2, col3 = st.columns(3)
#     with col1:
#         train_btn = st.button("▶️ 开始训练（前向+反向传播）", key="train_btn")
#         reset_btn = st.button("🔄 重置模型参数", key="reset_btn")
#     with col2:
#         show_scatter_btn = st.button("📈 显示散点图（含拟合曲线）", key="scatter_btn")
#         show_loss_btn = st.button("📉 显示损失函数曲线", key="loss_btn")
#     with col3:
#         show_3d_btn = st.button("🔺 显示梯度下降3D图", key="3d_btn")
#         show_contour_btn = st.button("🔵 显示损失等高线图", key="contour_btn")
#
#     # -------------------------- 4.1 模型重置 --------------------------
#     if reset_btn:
#         st.session_state.w = 0.0
#         st.session_state.b = 0.0
#         st.session_state.loss_history = []
#         st.success("✅ 模型参数已重置（w=0, b=0）")
#
#     # -------------------------- 4.2 模型训练（前向+反向传播） --------------------------
#     if train_btn:
#         w = st.session_state.w
#         b = st.session_state.b
#         lr = st.session_state.lr
#         epochs = st.session_state.epochs
#         x = st.session_state.x_data
#         y_true = st.session_state.y_data
#         loss_history = []
#
#         # 训练进度条
#         progress_bar = st.progress(0)
#         status_text = st.empty()
#
#         for epoch in range(epochs):
#             # 前向传播：计算预测值和损失
#             _, y_pre = forward_propagation(w, b, x)
#             loss = loss_fn(y_true, y_pre)
#             loss_history.append(loss)
#
#             # 反向传播：计算梯度并更新参数
#             grad_w, grad_b = backward_propagation(w, b, x, y_true, y_pre)
#             w -= lr * grad_w
#             b -= lr * grad_b
#
#             # 更新进度
#             progress = (epoch + 1) / epochs
#             progress_bar.progress(progress)
#             status_text.text(f"训练中：第{epoch + 1}/{epochs}轮，当前损失：{loss:.4f}")
#
#         # 训练完成，更新全局状态
#         st.session_state.w = w
#         st.session_state.b = b
#         st.session_state.loss_history = loss_history
#         progress_bar.empty()
#         status_text.empty()
#         st.success(f"✅ 训练完成！最终参数：w={w:.4f}, b={b:.4f}，最终损失：{loss:.4f}")
#
#     # -------------------------- 4.3 图表可视化 --------------------------
#     # 初始化全局图表（避免重复创建子图导致混乱）
#     if st.session_state.fig is None:
#         st.session_state.fig = plt.figure(figsize=(14, 8))
#         plt.rcParams["font.family"] = ["SimHei", "WenQuanYi Micro Hei"]  # 中文支持
#
#     # 清除现有子图（避免图表叠加）
#     for ax in st.session_state.fig.get_axes():
#         st.session_state.fig.delaxes(ax)
#
#     # 散点图（数据分布+拟合曲线）
#     if show_scatter_btn:
#         ax1 = st.session_state.fig.add_subplot(111)
#         # 绘制数据散点
#         ax1.scatter(
#             st.session_state.x_data,
#             st.session_state.y_data,
#             color="blue",
#             s=60,
#             label="原始数据"
#         )
#         # 绘制拟合曲线（覆盖x的取值范围）
#         x_min, x_max = st.session_state.x_data.min() - 0.5, st.session_state.x_data.max() + 0.5
#         x_plot = np.linspace(x_min, x_max, 100)
#         _, y_plot = forward_propagation(
#             st.session_state.w,
#             st.session_state.b,
#             x_plot
#         )
#         ax1.plot(
#             x_plot,
#             y_plot,
#             color="red",
#             linewidth=2,
#             label=f"拟合曲线（w={st.session_state.w:.4f}, b={st.session_state.b:.4f}）"
#         )
#         # 图表美化
#         ax1.set_xlabel("特征 x", fontsize=12)
#         ax1.set_ylabel("标签 y（0/1）", fontsize=12)
#         ax1.set_title("逻辑回归数据分布与拟合曲线", fontsize=14)
#         ax1.legend(fontsize=10)
#         ax1.grid(alpha=0.3)
#         # 显示图表
#         st.pyplot(st.session_state.fig)
#
#     # 损失函数曲线
#     if show_loss_btn:
#         if not st.session_state.loss_history:
#             st.warning("⚠️ 请先点击「开始训练」生成损失历史！")
#         else:
#             ax2 = st.session_state.fig.add_subplot(111)
#             ax2.plot(
#                 range(1, len(st.session_state.loss_history) + 1),
#                 st.session_state.loss_history,
#                 color="green",
#                 linewidth=2,
#                 marker="o",
#                 markersize=3
#             )
#             # 图表美化
#             ax2.set_xlabel("训练轮次（Epochs）", fontsize=12)
#             ax2.set_ylabel("均方误差损失（MSE）", fontsize=12)
#             ax2.set_title("训练过程损失函数变化", fontsize=14)
#             ax2.grid(alpha=0.3)
#             # 标注最终损失
#             final_loss = st.session_state.loss_history[-1]
#             ax2.text(
#                 0.7, 0.9,
#                 f"最终损失：{final_loss:.4f}",
#                 transform=ax2.transAxes,
#                 bbox=dict(boxstyle="round", facecolor="wheat", alpha=0.8),
#                 fontsize=10
#             )
#             st.pyplot(st.session_state.fig)
#
#     # 梯度下降3D图（损失函数曲面）
#     if show_3d_btn:
#         if st.session_state.W is None:
#             st.warning("⚠️ 3D网格未初始化，请先解析数据！")
#         else:
#             ax3 = st.session_state.fig.add_subplot(111, projection="3d")
#             # 绘制3D损失曲面
#             surf = ax3.plot_surface(
#                 st.session_state.W,
#                 st.session_state.B,
#                 st.session_state.loss_values,
#                 cmap="viridis",  # 颜色映射
#                 alpha=0.8,  # 透明度
#                 edgecolor="none"
#             )
#             # 标记当前模型参数对应的位置（黑色点）
#             current_loss = loss_fn(
#                 st.session_state.y_data,
#                 forward_propagation(st.session_state.w, st.session_state.b, st.session_state.x_data)[1]
#             )
#             ax3.scatter(
#                 st.session_state.w,
#                 st.session_state.b,
#                 current_loss,
#                 color="black",
#                 s=100,
#                 marker="o",
#                 label=f"当前参数（w={st.session_state.w:.2f}, b={st.session_state.b:.2f}）"
#             )
#             # 图表美化
#             ax3.set_xlabel("权重 w", fontsize=12)
#             ax3.set_ylabel("偏置 b", fontsize=12)
#             ax3.set_zlabel("损失值（MSE）", fontsize=12)
#             ax3.set_title("逻辑回归损失函数3D曲面", fontsize=14)
#             ax3.legend(fontsize=10)
#             # 添加颜色条
#             st.session_state.fig.colorbar(surf, ax=ax3, shrink=0.5, aspect=10, label="损失值")
#             st.pyplot(st.session_state.fig)
#
#     # 损失等高线图
#     if show_contour_btn:
#         if st.session_state.W is None:
#             st.warning("⚠️ 等高线网格未初始化，请先解析数据！")
#         else:
#             ax4 = st.session_state.fig.add_subplot(111)
#             # 绘制等高线（levels=20表示等高线密度）
#             contour = ax4.contour(
#                 st.session_state.W,
#                 st.session_state.B,
#                 st.session_state.loss_values,
#                 levels=20,
#                 cmap="viridis"
#             )
#             # 添加等高线标签
#             ax4.clabel(contour, inline=True, fontsize=8, fmt="%.2f")
#             # 标记当前参数位置（红色点）
#             ax4.scatter(
#                 st.session_state.w,
#                 st.session_state.b,
#                 color="red",
#                 s=100,
#                 marker="*",
#                 label=f"当前参数（w={st.session_state.w:.2f}, b={st.session_state.b:.2f}）"
#             )
#             # 图表美化
#             ax4.set_xlabel("权重 w", fontsize=12)
#             ax4.set_ylabel("偏置 b", fontsize=12)
#             ax4.set_title("逻辑回归损失函数等高线图", fontsize=14)
#             ax4.legend(fontsize=10)
#             ax4.grid(alpha=0.3)
#             st.pyplot(st.session_state.fig)
#
# # -------------------------- 5. 帮助说明 --------------------------
# st.subheader("3. 使用说明")
# st.markdown("""
# 1. **数据输入规则**：
#    - 输入格式：`x1,y1;x2,y2;...;xn,yn`（分号分隔样本，逗号分隔x和y）
#    - 标签限制：y必须是0或1（逻辑回归二分类任务）
#    - 示例：`1.2,0;2.5,0;3.1,1;4.8,1;5.3,1`
#
# 2. **模型操作流程**：
#    - 步骤1：输入数据并点击「解析数据」
#    - 步骤2：点击「开始训练」（自动执行前向+反向传播，共100轮）
#    - 步骤3：选择需要的可视化图表（散点图、损失曲线、3D图、等高线图）
#
# 3. **核心功能**：
#    - 前向传播：计算sigmoid预测值
#    - 反向传播：基于MSE损失计算梯度并更新参数
#    - 多维度可视化：直观展示模型训练过程和损失分布
# """)