import torch
import torch.nn as nn

# 模拟模型输出 logits: [batch_size=2, seq_len=4, vocab_size=5]
logits = torch.tensor([
    [[2.0, 0.5, 0.3, 0.1, 0.1],
     [0.1, 1.0, 0.1, 0.1, 0.1],
     [0.2, 0.2, 3.0, 0.1, 0.1],
     [0.0, 0.0, 0.0, 0.0, 0.0]],

    [[0.3, 0.2, 2.0, 0.1, 0.1],
     [0.1, 0.1, 0.1, 1.0, 0.1],
     [0.5, 0.1, 0.1, 0.1, 2.5],
     [0.0, 0.0, 0.0, 0.0, 0.0]]
])  # shape: [2, 4, 5]

# 模拟对应的标签（真实类别索引）
labels = torch.tensor([
    [0, 1, 2, -100],
    [2, 3, 4, -100]
])  # shape: [2, 4]

# 创建 CrossEntropyLoss，设置 ignore_index=-100
loss_fn = nn.CrossEntropyLoss(ignore_index=-100)

# 变换形状为 [batch_size * seq_len, vocab_size]
logits_flat = logits.view(-1, logits.size(-1))  # [8, 5]
labels_flat = labels.view(-1)                   # [8]

loss = loss_fn(logits_flat, labels_flat)
print(f"loss: {loss.item():.4f}")






