import torch

# 模拟一个 episode 有 3 步
T = 3
log_probs = [
    torch.tensor([[0.1]], requires_grad=True),   # [1,1]
    torch.tensor([[0.2]], requires_grad=True),   # [1,1]
    torch.tensor([[0.3]], requires_grad=True),   # [1,1]
]
returns = [10.0, 5.0, 2.0]  # G_t values

# 方法 1: 你当前的写法
policy_losses = []
for log_prob, G_val in zip(log_probs, returns):
    policy_losses.append(-log_prob * G_val)

loss1 = torch.cat(policy_losses, dim=0).sum()
print("loss1:", loss1)          # tensor(-2.6000, grad_fn=<SumBackward0>)
print("loss1.shape:", loss1.shape)  # torch.Size([])

# 方法 2: 更简洁的写法（等价）
stacked = torch.stack([lp.squeeze() for lp in log_probs])  # [3]
returns_tensor = torch.tensor(returns)                     # [3]
loss2 = -(stacked * returns_tensor).sum()
print("loss2:", loss2)          # tensor(-2.6000, grad_fn=<NegBackward0>)

# 方法 3: 直接累加（避免 cat/stack）
loss3 = torch.tensor(0.0, requires_grad=True)
for log_prob, G_val in zip(log_probs, returns):
    loss3 = loss3 + (-log_prob * G_val).squeeze()

print("loss3:", loss3)          # tensor(-2.6000, grad_fn=<AddBackward0>)

# 验证梯度是否一致
loss1.backward(retain_graph=True)
print("grad from loss1 (first log_prob):", log_probs[0].grad)  # tensor([[-10.]])
log_probs[0].grad.zero_()

loss2.backward(retain_graph=True)
print("grad from loss2 (first log_prob):", log_probs[0].grad)  # tensor([[-10.]])