# 导入torch和torch.nn模块
import torch
import torch.nn as nn

# 设置学习率为0.001
learning_rate = 0.01
max_iter = 100000
x_train = [1, 2, 3, 4, 5]
y_train = [3, 5, 7, 9, 11]

# 定义损失函数为均方误差
loss_func = torch.nn.MSELoss()
# 定义模型，包含一个线性层
model = nn.Sequential(
    nn.Linear(1, 1),
)
# 定义优化器为AdamW，学习率为0.001
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)

# 迭代max_iter次
for iter in range(max_iter):
    # 将训练集x_train转换为torch.tensor类型，并reshape为(-1, 1)
    xb = torch.tensor(x_train, dtype=torch.float).view(-1, 1)
    # 将训练集y_train转换为torch.tensor类型，并reshape为(-1, 1)
    yb = torch.tensor(y_train, dtype=torch.float).view(-1, 1)
    # 使用模型对xb进行预测
    target = model(xb)
    # 计算预测值与真实值之间的损失
    loss = loss_func(target, yb)
    print(loss.item())
    # 将梯度置零
    optimizer.zero_grad(set_to_none=True)
    # 反向传播计算梯度
    loss.backward()
    # # 打印梯度置零后的梯度
    # print("After zero_grad: ", optimizer.param_groups[0]['params'][0].grad)
    # 更新模型参数
    optimizer.step()
    # 如果损失函数的值小于0.01，退出循环
    if loss.item() < 0.0000001:
        print("Model parameters: ", model[0].weight.item(), model[0].bias.item())
        break