import torch
from torch.nn import MSELoss, BCEWithLogitsLoss

def train_one_epoch_as(model, loader, optimizer, device):
    """
    在一个 epoch 内，对图补全自编码器模型进行一次完整的训练：
      - loader: 每个 batch 返回 (masked_data, full_data)
      - optimizer: 优化器
      - device: 'cpu' 或 'cuda'
    返回：
      - avg_loss (float): 本 epoch 的平均损失
    """
    model.train()
    mse  = MSELoss()
    bce  = BCEWithLogitsLoss()
    total_loss = 0.0

    for masked_data, full_data in loader:
        # 1) 将数据搬到指定设备
        masked_data = masked_data.to(device)
        full_data   = full_data.to(device)

        # 2) 前向 + 反向
        optimizer.zero_grad()
        # 假设 AEModel 返回 (node_recon, edge_logits_fn)
        node_recon, edge_logits_fn = model(masked_data)

        # —— LOSS1：对所有节点特征做重建 MSE ——
        loss1 = mse(node_recon, full_data.x)

        # —— LOSS3：仅对被 mask 的节点特征做重建 ——
        m_nodes = masked_data.masked_info['masked_node_indices']           # 节点索引 :contentReference[oaicite:0]{index=0}
        orig_x  = masked_data.masked_info['original_node_features']       # 原始特征 :contentReference[oaicite:1]{index=1}
        loss3   = mse(node_recon[m_nodes], orig_x)

        # —— LOSS2：仅对被移除的边做二分类预测 ——
        m_edges = masked_data.masked_info['masked_edge_index']            # 边索引 :contentReference[oaicite:2]{index=2}
        logits  = edge_logits_fn(m_edges)
        labels  = torch.ones_like(logits)  # 被 mask 的边真实存在
        loss2   = bce(logits, labels)

        # —— 总损失 ——
        loss = loss1 + loss2 + loss3
        loss.backward()
        optimizer.step()

        total_loss += loss.item()

    avg_loss = total_loss / len(loader)
    return avg_loss
