import streamlit as st
import numpy as np
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import plotly.graph_objects as go
from plotly.subplots import make_subplots


def show():
    st.markdown('<h2 class="sub-header">学习率调度器</h2>', unsafe_allow_html=True)

    st.markdown("""
    <div class="content">
        <p>学习率调度器用于在训练过程中动态调整学习率，以提高模型性能并加速收敛。</p>
        <p>适当的学习率调度可以帮助模型跳出局部最小值，达到更好的全局最优解。</p>
    </div>
    """, unsafe_allow_html=True)

    # 学习率调度器对比表格
    st.subheader("学习率调度器对比")

    st.markdown("""
    | 调度器 | 核心思想 | 优点 | 缺点 | 适用场景 |
    |--------|---------|------|------|---------|
    | StepLR | 每隔固定步长降低学习率 | 简单易用 | 需要手动设置步长和衰减率 | 基础学习率调整 |
    | MultiStepLR | 在指定里程碑降低学习率 | 更灵活的时间点控制 | 需要预先知道训练进度 | 复杂训练计划 |
    | ExponentialLR | 指数衰减学习率 | 平滑衰减，无需设置步长 | 衰减可能过快或过慢 | 需要平滑衰减的场景 |
    | CosineAnnealingLR | 余弦退火调整学习率 | 有助于跳出局部最小值 | 周期性变化可能不稳定 | 需要跳出局部最优的场景 |
    | ReduceLROnPlateau | 在性能停滞时降低学习率 | 自适应调整，无需预设时间表 | 需要验证集监控性能 | 不确定最佳学习率调整时机的场景 |
    | LambdaLR | 基于自定义函数调整学习率 | 灵活性高 | 需要用户定义函数 | 需要自定义调整规则的场景 |
    | LinearLR | 线性调整学习率 | 缓解学习率突变 | 需要精细调整参数 | 训练开始时的热身阶段 |
    """)

    # 学习率调度器可视化
    st.subheader("学习率调度器可视化")

    # 选择调度器类型
    scheduler_type = st.selectbox(
        "选择学习率调度器",
        ["StepLR", "MultiStepLR", "ExponentialLR", "CosineAnnealingLR",
         "ReduceLROnPlateau", "LambdaLR", "LinearLR"]
    )

    # 基础参数
    col1, col2 = st.columns(2)
    with col1:
        initial_lr = st.slider("初始学习率", 0.0001, 0.1, 0.01, 0.0001, format="%.4f")
    with col2:
        epochs = st.slider("训练轮数", 10, 200, 100, 10)

    # 根据选择的调度器显示特定参数
    if scheduler_type == "StepLR":
        step_size = st.slider("步长", 1, 50, 30, 1)
        gamma = st.slider("衰减率", 0.1, 0.9, 0.1, 0.1)
    elif scheduler_type == "MultiStepLR":
        milestones = st.text_input("里程碑 (逗号分隔)", "30,60,90")
        gamma = st.slider("衰减率", 0.1, 0.9, 0.1, 0.1)
    elif scheduler_type == "ExponentialLR":
        gamma = st.slider("衰减率", 0.8, 0.99, 0.95, 0.01)
    elif scheduler_type == "CosineAnnealingLR":
        t_max = st.slider("周期长度", 10, 100, 50, 10)
        eta_min = st.slider("最小学习率", 0.00001, 0.001, 0.0001, 0.00001, format="%.5f")
    elif scheduler_type == "ReduceLROnPlateau":
        factor = st.slider("衰减因子", 0.1, 0.9, 0.1, 0.1)
        patience = st.slider("耐心值", 1, 20, 5, 1)
        mode = st.selectbox("模式", ["min", "max"])
    elif scheduler_type == "LambdaLR":
        lambda_type = st.selectbox("Lambda函数类型", ["指数衰减", "线性衰减", "阶梯衰减"])
        if lambda_type == "指数衰减":
            lambda_gamma = st.slider("衰减率", 0.8, 0.99, 0.95, 0.01)
        elif lambda_type == "线性衰减":
            start_factor = st.slider("起始因子", 0.1, 1.0, 0.1, 0.1)
            end_factor = st.slider("结束因子", 0.1, 1.0, 0.1, 0.1)
        else:  # 阶梯衰减
            step_size = st.slider("步长", 1, 50, 30, 1)
            gamma = st.slider("衰减率", 0.1, 0.9, 0.1, 0.1)
    elif scheduler_type == "LinearLR":
        start_factor = st.slider("起始因子", 0.1, 1.0, 0.1, 0.1)
        end_factor = st.slider("结束因子", 0.1, 1.0, 1.0, 0.1)
        total_iters = st.slider("总迭代次数", 1, 100, 5, 1)

    # 模拟学习率变化
    if st.button("可视化学习率变化"):
        # 创建虚拟优化器
        model = nn.Linear(10, 2)
        optimizer = optim.SGD(model.parameters(), lr=initial_lr)

        # 创建调度器
        if scheduler_type == "StepLR":
            scheduler = lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)
            title = f"StepLR (step_size={step_size}, gamma={gamma})"
        elif scheduler_type == "MultiStepLR":
            milestone_list = [int(x.strip()) for x in milestones.split(",")]
            scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=milestone_list, gamma=gamma)
            title = f"MultiStepLR (milestones={milestone_list}, gamma={gamma})"
        elif scheduler_type == "ExponentialLR":
            scheduler = lr_scheduler.ExponentialLR(optimizer, gamma=gamma)
            title = f"ExponentialLR (gamma={gamma})"
        elif scheduler_type == "CosineAnnealingLR":
            scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max=t_max, eta_min=eta_min)
            title = f"CosineAnnealingLR (T_max={t_max}, eta_min={eta_min})"
        elif scheduler_type == "ReduceLROnPlateau":
            # 对于ReduceLROnPlateau，我们需要模拟性能指标
            scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode=mode, factor=factor, patience=patience)
            title = f"ReduceLROnPlateau (mode={mode}, factor={factor}, patience={patience})"
        elif scheduler_type == "LambdaLR":
            if lambda_type == "指数衰减":
                lr_lambda = lambda epoch: lambda_gamma ** epoch
                scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
                title = f"LambdaLR (指数衰减, gamma={lambda_gamma})"
            elif lambda_type == "线性衰减":
                lr_lambda = lambda epoch: start_factor + (end_factor - start_factor) * epoch / epochs
                scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
                title = f"LambdaLR (线性衰减, start_factor={start_factor}, end_factor={end_factor})"
            else:  # 阶梯衰减
                lr_lambda = lambda epoch: gamma ** (epoch // step_size)
                scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
                title = f"LambdaLR (阶梯衰减, step_size={step_size}, gamma={gamma})"
        elif scheduler_type == "LinearLR":
            scheduler = lr_scheduler.LinearLR(optimizer, start_factor=start_factor, end_factor=end_factor,
                                              total_iters=total_iters)
            title = f"LinearLR (start_factor={start_factor}, end_factor={end_factor}, total_iters={total_iters})"

        # 记录学习率变化
        lr_history = []

        for epoch in range(epochs):
            # 获取当前学习率
            current_lr = optimizer.param_groups[0]['lr']
            lr_history.append(current_lr)

            # 模拟训练步骤
            if scheduler_type == "ReduceLROnPlateau":
                # 模拟性能指标（假设先下降后上升）
                if epoch < epochs // 2:
                    metric = 1.0 - epoch / (epochs // 2)
                else:
                    metric = (epoch - epochs // 2) / (epochs // 2)
                scheduler.step(metric)
            else:
                scheduler.step()

        # 绘制学习率变化曲线
        fig = go.Figure()
        fig.add_trace(go.Scatter(
            x=list(range(1, epochs + 1)),
            y=lr_history,
            mode='lines+markers',
            name='学习率'
        ))

        fig.update_layout(
            title=title,
            xaxis_title='Epoch',
            yaxis_title='Learning Rate',
            yaxis_type='log' if max(lr_history) / min(lr_history) > 10 else 'linear'
        )

        st.plotly_chart(fig)

        # 显示统计信息
        col1, col2, col3 = st.columns(3)
        col1.metric("初始学习率", f"{initial_lr:.6f}")
        col2.metric("最终学习率", f"{lr_history[-1]:.6f}")
        col3.metric("学习率变化倍数", f"{lr_history[-1] / initial_lr:.3f}")

    # 调度器代码示例
    st.subheader("PyTorch调度器使用示例")

    if scheduler_type == "StepLR":
        code_example = f"""
import torch.optim as optim
from torch.optim import lr_scheduler

# 定义模型和优化器
model = ...
optimizer = optim.SGD(model.parameters(), lr={initial_lr})

# 创建StepLR调度器
scheduler = lr_scheduler.StepLR(optimizer, step_size={step_size}, gamma={gamma})

# 训练循环
for epoch in range({epochs}):
    # 训练步骤
    ...

    # 更新学习率
    scheduler.step()
        """
    elif scheduler_type == "MultiStepLR":
        code_example = f"""
import torch.optim as optim
from torch.optim import lr_scheduler

# 定义模型和优化器
model = ...
optimizer = optim.SGD(model.parameters(), lr={initial_lr})

# 创建MultiStepLR调度器
scheduler = lr_scheduler.MultiStepLR(optimizer, milestones=[{milestones}], gamma={gamma})

# 训练循环
for epoch in range({epochs}):
    # 训练步骤
    ...

    # 更新学习率
    scheduler.step()
        """
    elif scheduler_type == "ExponentialLR":
        code_example = f"""
import torch.optim as optim
from torch.optim import lr_scheduler

# 定义模型和优化器
model = ...
optimizer = optim.SGD(model.parameters(), lr={initial_lr})

# 创建ExponentialLR调度器
scheduler = lr_scheduler.ExponentialLR(optimizer, gamma={gamma})

# 训练循环
for epoch in range({epochs}):
    # 训练步骤
    ...

    # 更新学习率
    scheduler.step()
        """
    elif scheduler_type == "CosineAnnealingLR":
        code_example = f"""
import torch.optim as optim
from torch.optim import lr_scheduler

# 定义模型和优化器
model = ...
optimizer = optim.SGD(model.parameters(), lr={initial_lr})

# 创建CosineAnnealingLR调度器
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, T_max={t_max}, eta_min={eta_min})

# 训练循环
for epoch in range({epochs}):
    # 训练步骤
    ...

    # 更新学习率
    scheduler.step()
        """
    elif scheduler_type == "ReduceLROnPlateau":
        code_example = f"""
import torch.optim as optim
from torch.optim import lr_scheduler

# 定义模型和优化器
model = ...
optimizer = optim.SGD(model.parameters(), lr={initial_lr})

# 创建ReduceLROnPlateau调度器
scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, mode='{mode}', factor={factor}, patience={patience})

# 训练循环
for epoch in range({epochs}):
    # 训练步骤
    ...

    # 计算验证损失或其他指标
    val_loss = ...

    # 更新学习率（基于验证损失）
    scheduler.step(val_loss)
        """
    elif scheduler_type == "LambdaLR":
        if lambda_type == "指数衰减":
            code_example = f"""
import torch.optim as optim
from torch.optim import lr_scheduler

# 定义模型和优化器
model = ...
optimizer = optim.SGD(model.parameters(), lr={initial_lr})

# 创建LambdaLR调度器（指数衰减）
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda epoch: {lambda_gamma} ** epoch)

# 训练循环
for epoch in range({epochs}):
    # 训练步骤
    ...

    # 更新学习率
    scheduler.step()
            """
        elif lambda_type == "线性衰减":
            code_example = f"""
import torch.optim as optim
from torch.optim import lr_scheduler

# 定义模型和优化器
model = ...
optimizer = optim.SGD(model.parameters(), lr={initial_lr})

# 创建LambdaLR调度器（线性衰减）
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda epoch: {start_factor} + ({end_factor}-{start_factor}) * epoch / {epochs})

# 训练循环
for epoch in range({epochs}):
    # 训练步骤
    ...

    # 更新学习率
    scheduler.step()
            """
        else:  # 阶梯衰减
            code_example = f"""
import torch.optim as optim
from torch.optim import lr_scheduler

# 定义模型和优化器
model = ...
optimizer = optim.SGD(model.parameters(), lr={initial_lr})

# 创建LambdaLR调度器（阶梯衰减）
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda epoch: {gamma} ** (epoch // {step_size}))

# 训练循环
for epoch in range({epochs}):
    # 训练步骤
    ...

    # 更新学习率
    scheduler.step()
            """
    elif scheduler_type == "LinearLR":
        code_example = f"""
import torch.optim as optim
from torch.optim import lr_scheduler

# 定义模型和优化器
model = ...
optimizer = optim.SGD(model.parameters(), lr={initial_lr})

# 创建LinearLR调度器
scheduler = lr_scheduler.LinearLR(optimizer, start_factor={start_factor}, end_factor={end_factor}, total_iters={total_iters})

# 训练循环
for epoch in range({epochs}):
    # 训练步骤
    ...

    # 更新学习率
    scheduler.step()
        """

    st.code(code_example, language="python")

    # 调度器选择指南
    st.subheader("调度器选择指南")

    st.markdown("""
    1. **StepLR**：适用于学习率需要按照固定步长减少的情况，实现简单
    2. **MultiStepLR**：比StepLR更灵活，可以自定义多个下降点
    3. **ExponentialLR**：适用于需要平滑减少学习率的情况，衰减过程连续
    4. **CosineAnnealingLR**：可以避免局部最小值，周期性的调整有助于探索更多的参数空间
    5. **ReduceLROnPlateau**：基于模型的实际表现动态调整学习率，适用于不确定何时调整学习率的情况
    6. **LambdaLR**：灵活性高，可以根据自定义规则调整学习率
    7. **LinearLR**：有效缓解学习率突变，提高训练稳定性，常用于训练开始时的热身阶段
    """)


if __name__ == "__main__":
    show()