import torch.nn.functional as F

# 训练损失，只训练注意力权重
def model_loss_train_attn_only(disp_ests, disp_gt, mask):
    weights = [1.0]
    all_losses = []
    for disp_est, weight in zip(disp_ests, weights):
        all_losses.append(weight * F.smooth_l1_loss(disp_est[mask], disp_gt[mask], reduction='mean'))
    return sum(all_losses)

# 训练损失，冻结注意力权重
def model_loss_train_freeze_attn(disp_ests, disp_gt, mask):
    weights = [0.5, 0.7, 1.0]
    all_losses = []
    for disp_est, weight in zip(disp_ests, weights):
        all_losses.append(weight * F.smooth_l1_loss(disp_est[mask], disp_gt[mask], reduction='mean'))
    return sum(all_losses)

# 训练损失，默认运行，训练所有参数
def model_loss_train(disp_ests, disp_gt, mask): # 输入预测视差List[用于监督注意力权重的视差(B,H,W),回归视差0(B,H,W),回归视差1(B,H,W),回归视差2(B,H,W)]，地面实况，掩码
    weights = [0.5, 0.5, 0.7, 1.0] # 监督注意力权重的权重0.5，回归视差0权重0.5，回归视差1权重0.7，回归视差2权重1.0
    all_losses = []
    for disp_est, weight in zip(disp_ests, weights):
        all_losses.append(weight * F.smooth_l1_loss(disp_est[mask], disp_gt[mask], reduction='mean')) # 所有损失加入：权重*smoothL1损失
    return sum(all_losses) # 所有损失求和

# 验证损失
def model_loss_test(disp_ests, disp_gt, mask): # 输入预测视差List[回归视差2(B,H,W)]，地面实况，掩码
    weights = [1.0] # 回归视差2权重1.0
    all_losses = []
    for disp_est, weight in zip(disp_ests, weights):
        all_losses.append(weight * F.l1_loss(disp_est[mask], disp_gt[mask], reduction='mean')) # 所有损失加入：权重*smoothL1损失
    return sum(all_losses) # 所有损失求和
