import streamlit as st
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import plotly.graph_objects as go
from plotly.subplots import make_subplots


def show():
    st.markdown('<h2 class="sub-header">正则化技术</h2>', unsafe_allow_html=True)

    st.markdown("""
    <div class="content">
        <p>正则化技术用于防止模型过拟合，提高模型的泛化能力。</p>
        <p>通过添加约束或惩罚项，正则化可以限制模型的复杂度，使其更倾向于学习简单的模式。</p>
    </div>
    """, unsafe_allow_html=True)

    # 正则化技术对比表格
    st.subheader("正则化技术对比")

    st.markdown("""
    | 技术 | 核心思想 | 优点 | 缺点 | 适用场景 |
    |------|---------|------|------|---------|
    | L1正则化 (Lasso) | 添加权重的绝对值之和作为惩罚项 | 产生稀疏权重，可用于特征选择 | 可能过度稀疏化，丢失重要特征 | 特征选择，高维数据 |
    | L2正则化 (Ridge) | 添加权重的平方和作为惩罚项 | 平滑权重分布，防止过拟合 | 不会产生稀疏解 | 大多数回归和分类问题 |
    | Dropout | 随机丢弃神经元 | 防止神经元共适应，提高泛化能力 | 训练时间增加，需要更多迭代 | 深度神经网络 |
    | 早停 (Early Stopping) | 在验证集性能不再提升时停止训练 | 简单有效，无需修改模型 | 需要验证集，可能过早停止 | 各种模型训练 |
    | 数据增强 | 增加训练数据的多样性 | 提高模型泛化能力，无需修改模型 | 可能引入不真实的样本 | 图像、文本等数据 |
    | 批量归一化 (BatchNorm) | 标准化每层的输入 | 加速训练，提高稳定性 | 增加计算复杂度，小批量上效果差 | 深度神经网络 |
    | 权重衰减 | 在优化过程中直接衰减权重 | 等价于L2正则化，实现简单 | 需要调整衰减率 | 各种神经网络 |
    """)

    # 选择正则化技术
    st.subheader("正则化技术演示")

    regularization_type = st.selectbox(
        "选择正则化技术",
        ["L1正则化", "L2正则化", "Dropout", "早停", "批量归一化", "权重衰减"]
    )

    # 生成数据
    X, y = make_classification(
        n_samples=1000, n_features=20, n_informative=5,
        n_redundant=5, random_state=42
    )

    # 数据标准化和分割
    scaler = StandardScaler()
    X = scaler.fit_transform(X)
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
    X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)

    # 转换为PyTorch张量
    X_train_tensor = torch.FloatTensor(X_train)
    y_train_tensor = torch.LongTensor(y_train)
    X_val_tensor = torch.FloatTensor(X_val)
    y_val_tensor = torch.LongTensor(y_val)
    X_test_tensor = torch.FloatTensor(X_test)
    y_test_tensor = torch.LongTensor(y_test)

    # 训练参数
    col1, col2 = st.columns(2)
    with col1:
        hidden_units = st.slider("隐藏层神经元数量", 16, 256, 64, 16)
        learning_rate = st.slider("学习率", 0.0001, 0.1, 0.01, 0.0001, format="%.4f")
    with col2:
        epochs = st.slider("训练轮数", 10, 500, 100, 10)
        batch_size = st.slider("批处理大小", 16, 256, 64, 16)

    # 正则化参数
    if regularization_type in ["L1正则化", "L2正则化", "权重衰减"]:
        reg_strength = st.slider("正则化强度", 0.0001, 0.1, 0.01, 0.0001, format="%.4f")
    elif regularization_type == "Dropout":
        dropout_rate = st.slider("Dropout率", 0.1, 0.9, 0.5, 0.1)
    elif regularization_type == "早停":
        patience = st.slider("耐心值", 1, 20, 5, 1)
    elif regularization_type == "批量归一化":
        momentum = st.slider("动量", 0.1, 0.9, 0.1, 0.1)

    # 定义神经网络模型
    def create_model(use_dropout=False, use_batchnorm=False, dropout_rate=0.5):
        layers = []
        layers.append(nn.Linear(X.shape[1], hidden_units))

        if use_batchnorm:
            layers.append(nn.BatchNorm1d(hidden_units, momentum=momentum))

        layers.append(nn.ReLU())

        if use_dropout:
            layers.append(nn.Dropout(dropout_rate))

        layers.append(nn.Linear(hidden_units, 2))
        return nn.Sequential(*layers)

    # 训练按钮
    if st.button("比较正则化效果"):
        # 创建有正则化和无正则化的模型
        if regularization_type == "L1正则化":
            model_with_reg = create_model()
            model_without_reg = create_model()

            # 自定义L1正则化损失函数
            def l1_regularization(model, strength):
                l1_loss = 0
                for param in model.parameters():
                    l1_loss += torch.norm(param, 1)
                return strength * l1_loss
        elif regularization_type == "L2正则化":
            model_with_reg = create_model()
            model_without_reg = create_model()
        elif regularization_type == "Dropout":
            model_with_reg = create_model(use_dropout=True, dropout_rate=dropout_rate)
            model_without_reg = create_model(use_dropout=False)
        elif regularization_type == "早停":
            model_with_reg = create_model()
            model_without_reg = create_model()
        elif regularization_type == "批量归一化":
            model_with_reg = create_model(use_batchnorm=True)
            model_without_reg = create_model(use_batchnorm=False)
        elif regularization_type == "权重衰减":
            model_with_reg = create_model()
            model_without_reg = create_model()

        # 定义优化器
        if regularization_type == "L2正则化" or regularization_type == "权重衰减":
            optimizer_with_reg = optim.SGD(model_with_reg.parameters(), lr=learning_rate, weight_decay=reg_strength)
            optimizer_without_reg = optim.SGD(model_without_reg.parameters(), lr=learning_rate)
        else:
            optimizer_with_reg = optim.SGD(model_with_reg.parameters(), lr=learning_rate)
            optimizer_without_reg = optim.SGD(model_without_reg.parameters(), lr=learning_rate)

        criterion = nn.CrossEntropyLoss()

        # 训练模型
        def train_model(model, optimizer, use_early_stopping=False):
            train_losses = []
            val_losses = []
            val_accuracies = []

            best_val_loss = float('inf')
            epochs_no_improve = 0

            for epoch in range(epochs):
                model.train()
                train_loss = 0

                # 训练步骤
                optimizer.zero_grad()
                outputs = model(X_train_tensor)
                loss = criterion(outputs, y_train_tensor)

                # 添加L1正则化（如果使用）
                if regularization_type == "L1正则化":
                    loss += l1_regularization(model, reg_strength)

                loss.backward()
                optimizer.step()

                train_losses.append(loss.item())

                # 验证步骤
                model.eval()
                with torch.no_grad():
                    val_outputs = model(X_val_tensor)
                    val_loss = criterion(val_outputs, y_val_tensor)
                    val_losses.append(val_loss.item())

                    _, predicted = torch.max(val_outputs.data, 1)
                    val_accuracy = (predicted == y_val_tensor).sum().item() / y_val_tensor.size(0)
                    val_accuracies.append(val_accuracy)

                # 早停检查
                if use_early_stopping:
                    if val_loss < best_val_loss:
                        best_val_loss = val_loss
                        epochs_no_improve = 0
                    else:
                        epochs_no_improve += 1

                    if epochs_no_improve >= patience:
                        st.write(f"早停在 epoch {epoch + 1}")
                        break

            return train_losses, val_losses, val_accuracies

        # 训练有正则化和无正则化的模型
        st.write("训练有正则化的模型...")
        train_loss_with_reg, val_loss_with_reg, val_acc_with_reg = train_model(
            model_with_reg, optimizer_with_reg, use_early_stopping=(regularization_type == "早停")
        )

        st.write("训练无正则化的模型...")
        train_loss_without_reg, val_loss_without_reg, val_acc_without_reg = train_model(
            model_without_reg, optimizer_without_reg
        )

        # 可视化结果
        fig = make_subplots(
            rows=2, cols=2,
            subplot_titles=("训练损失", "验证损失", "验证准确率", "权重分布"),
            specs=[[{"type": "scatter"}, {"type": "scatter"}],
                   [{"type": "scatter"}, {"type": "histogram"}]]
        )

        # 训练损失
        fig.add_trace(
            go.Scatter(
                x=list(range(1, len(train_loss_with_reg) + 1)),
                y=train_loss_with_reg,
                mode="lines",
                name="有正则化",
                line=dict(color="blue")
            ),
            row=1, col=1
        )

        fig.add_trace(
            go.Scatter(
                x=list(range(1, len(train_loss_without_reg) + 1)),
                y=train_loss_without_reg,
                mode="lines",
                name="无正则化",
                line=dict(color="red")
            ),
            row=1, col=1
        )

        # 验证损失
        fig.add_trace(
            go.Scatter(
                x=list(range(1, len(val_loss_with_reg) + 1)),
                y=val_loss_with_reg,
                mode="lines",
                name="有正则化",
                line=dict(color="blue"),
                showlegend=False
            ),
            row=1, col=2
        )

        fig.add_trace(
            go.Scatter(
                x=list(range(1, len(val_loss_without_reg) + 1)),
                y=val_loss_without_reg,
                mode="lines",
                name="无正则化",
                line=dict(color="red"),
                showlegend=False
            ),
            row=1, col=2
        )

        # 验证准确率
        fig.add_trace(
            go.Scatter(
                x=list(range(1, len(val_acc_with_reg) + 1)),
                y=val_acc_with_reg,
                mode="lines",
                name="有正则化",
                line=dict(color="blue"),
                showlegend=False
            ),
            row=2, col=1
        )

        fig.add_trace(
            go.Scatter(
                x=list(range(1, len(val_acc_without_reg) + 1)),
                y=val_acc_without_reg,
                mode="lines",
                name="无正则化",
                line=dict(color="red"),
                showlegend=False
            ),
            row=2, col=1
        )

        # 权重分布（仅对有参数的层）
        if regularization_type in ["L1正则化", "L2正则化", "权重衰减"]:
            weights_with_reg = []
            weights_without_reg = []

            for param in model_with_reg.parameters():
                if param.dim() > 1:  # 只考虑权重，忽略偏置
                    weights_with_reg.extend(param.data.flatten().numpy())

            for param in model_without_reg.parameters():
                if param.dim() > 1:
                    weights_without_reg.extend(param.data.flatten().numpy())

            fig.add_trace(
                go.Histogram(
                    x=weights_with_reg,
                    name="有正则化",
                    opacity=0.5,
                    marker_color="blue"
                ),
                row=2, col=2
            )

            fig.add_trace(
                go.Histogram(
                    x=weights_without_reg,
                    name="无正则化",
                    opacity=0.5,
                    marker_color="red"
                ),
                row=2, col=2
            )

        fig.update_layout(height=800, title_text=f"{regularization_type}效果比较")
        st.plotly_chart(fig)

        # 显示最终性能
        col1, col2 = st.columns(2)
        col1.metric("有正则化 - 最终验证准确率", f"{val_acc_with_reg[-1] * 100:.2f}%")
        col2.metric("无正则化 - 最终验证准确率", f"{val_acc_without_reg[-1] * 100:.2f}%")

    # 正则化代码示例
    st.subheader("PyTorch正则化使用示例")

    if regularization_type == "L1正则化":
        code_example = f"""
import torch
import torch.nn as nn
import torch.optim as optim

# 定义模型
model = nn.Sequential(
    nn.Linear(20, {hidden_units}),
    nn.ReLU(),
    nn.Linear({hidden_units}, 2)
)

# 定义优化器
optimizer = optim.SGD(model.parameters(), lr={learning_rate})

# 自定义L1正则化损失函数
def l1_regularization(model, strength):
    l1_loss = 0
    for param in model.parameters():
        l1_loss += torch.norm(param, 1)
    return strength * l1_loss

# 训练循环
for epoch in range({epochs}):
    # 前向传播
    outputs = model(X_train)
    loss = criterion(outputs, y_train)

    # 添加L1正则化
    loss += l1_regularization(model, {reg_strength})

    # 反向传播和优化
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
        """
    elif regularization_type == "L2正则化":
        code_example = f"""
import torch
import torch.nn as nn
import torch.optim as optim

# 定义模型
model = nn.Sequential(
    nn.Linear(20, {hidden_units}),
    nn.ReLU(),
    nn.Linear({hidden_units}, 2)
)

# 定义优化器（使用weight_decay参数添加L2正则化）
optimizer = optim.SGD(model.parameters(), lr={learning_rate}, weight_decay={reg_strength})

# 训练循环
for epoch in range({epochs}):
    # 前向传播
    outputs = model(X_train)
    loss = criterion(outputs, y_train)

    # 反向传播和优化
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
        """
    elif regularization_type == "Dropout":
        code_example = f"""
import torch
import torch.nn as nn
import torch.optim as optim

# 定义模型（包含Dropout层）
model = nn.Sequential(
    nn.Linear(20, {hidden_units}),
    nn.ReLU(),
    nn.Dropout({dropout_rate}),  # Dropout层
    nn.Linear({hidden_units}, 2)
)

# 定义优化器
optimizer = optim.SGD(model.parameters(), lr={learning_rate})

# 训练循环
for epoch in range({epochs}):
    # 训练时启用Dropout
    model.train()

    # 前向传播
    outputs = model(X_train)
    loss = criterion(outputs, y_train)

    # 反向传播和优化
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    # 验证时禁用Dropout
    model.eval()
    with torch.no_grad():
        val_outputs = model(X_val)
        val_loss = criterion(val_outputs, y_val)
        """
    elif regularization_type == "早停":
        code_example = f"""
import torch
import torch.nn as nn
import torch.optim as optim

# 定义模型
model = nn.Sequential(
    nn.Linear(20, {hidden_units}),
    nn.ReLU(),
    nn.Linear({hidden_units}, 2)
)

# 定义优化器
optimizer = optim.SGD(model.parameters(), lr={learning_rate})

# 早停参数
best_val_loss = float('inf')
patience = {patience}
epochs_no_improve = 0

# 训练循环
for epoch in range({epochs}):
    # 训练步骤
    model.train()
    outputs = model(X_train)
    loss = criterion(outputs, y_train)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()

    # 验证步骤
    model.eval()
    with torch.no_grad():
        val_outputs = model(X_val)
        val_loss = criterion(val_outputs, y_val)

    # 早停检查
    if val_loss < best_val_loss:
        best_val_loss = val_loss
        epochs_no_improve = 0
    else:
        epochs_no_improve += 1

    if epochs_no_improve >= patience:
        print(f"早停在 epoch {{epoch+1}}")
        break
        """
    elif regularization_type == "批量归一化":
        code_example = f"""
import torch
import torch.nn as nn
import torch.optim as optim

# 定义模型（包含批量归一化层）
model = nn.Sequential(
    nn.Linear(20, {hidden_units}),
    nn.BatchNorm1d({hidden_units}, momentum={momentum}),  # 批量归一化层
    nn.ReLU(),
    nn.Linear({hidden_units}, 2)
)

# 定义优化器
optimizer = optim.SGD(model.parameters(), lr={learning_rate})

# 训练循环
for epoch in range({epochs}):
    # 训练步骤
    model.train()
    outputs = model(X_train)
    loss = criterion(outputs, y_train)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
        """
    elif regularization_type == "权重衰减":
        code_example = f"""
import torch
import torch.nn as nn
import torch.optim as optim

# 定义模型
model = nn.Sequential(
    nn.Linear(20, {hidden_units}),
    nn.ReLU(),
    nn.Linear({hidden_units}, 2)
)

# 定义优化器（使用weight_decay参数添加权重衰减）
optimizer = optim.SGD(model.parameters(), lr={learning_rate}, weight_decay={reg_strength})

# 训练循环
for epoch in range({epochs}):
    # 训练步骤
    outputs = model(X_train)
    loss = criterion(outputs, y_train)

    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
        """

    st.code(code_example, language="python")

    # 正则化选择指南
    st.subheader("正则化技术选择指南")

    st.markdown("""
    1. **L1正则化**：适用于特征选择，高维数据，可以产生稀疏权重矩阵
    2. **L2正则化**：适用于大多数回归和分类问题，平滑权重分布，防止过拟合
    3. **Dropout**：适用于深度神经网络，防止神经元共适应，提高泛化能力
    4. **早停**：适用于各种模型训练，简单有效，无需修改模型结构
    5. **批量归一化**：适用于深度神经网络，加速训练，提高稳定性
    6. **权重衰减**：适用于各种神经网络，实现简单，等价于L2正则化

    **建议**：对于大多数深度学习任务，可以组合使用多种正则化技术，如L2正则化+Dropout+批量归一化。
    """)


if __name__ == "__main__":
    show()
