import torch
import torch.nn as nn

# 超参数
lr = 0.1
Epochs = 100

# 定义一个简单的线型模型
model = nn.Linear(1, 1, bias=False)
# 损失函数
loss_fn = nn.MSELoss()
# 优化器
optimizer = torch.optim.Adagrad(
    model.parameters(),
    lr=lr,
    lr_decay=0,     # 学习率衰减系数
    weight_decay=0, # L2正则化 （权重衰减）
    initial_accumulator_value=0,    # 梯度平方累计和初始值
    eps=1e-10   # 避免分母为0的极小常数
)
# 构造假数据
x_data = torch.tensor([[1], [2], [3], [4]]).float()
y_data = torch.tensor([[2], [4], [6], [8]]).float()

# 循环训练
for epoch in range(Epochs):
    optimizer.zero_grad()
    y_pre = model(x_data)
    loss = loss_fn(y_pre, y_data)
    loss.backward()
    optimizer.step()
    if epoch==0 or (epoch+1)%10==0:
        # 查看参数
        for param in model.parameters():
            # 获取当前优化器的状态
            state = optimizer.state[param]
            # 计算当前实际学习率
            if 'sum' in state:
                actual_lr = lr / torch.sqrt(state['sum'] + 1e-10)
        print(f"Epoch: {epoch + 1} Loss:{loss.item():.4f} LR:{actual_lr.item():.6f}")








