x = torch.ones(2, 3)*0.2
net1 = nn.Linear(3, 3)
net2 = nn.Linear(3, 3)
tgt1 = torch.ones(2, 3)*0.5
tgt2 = torch.ones(2, 3)
loss_fun = torch.nn.MSELoss()
opt1 = torch.optim.Adam(net1.parameters(), 0.002)
opt2 = torch.optim.Adam(net2.parameters(), 0.002)
pred1 = net1(x)
loss1=loss_fun(pred1, tgt1)
pred2 = net2(pred1)
#pred2 = net2(x)
loss2 = loss_fun(pred2, tgt2)
tol_loss= loss1+loss2
opt1.zero_grad()
opt2.zero_grad()
# loss1.backward()
# loss2.backward()
tol_loss.backward() # 梯度之和等于和的梯度）
# ++++++
opt1.step()
opt2.step()
