from transformers import BertTokenizer, BertForMaskedLM
 
# 加载 BERT tokenizer 和 模型
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMaskedLM.from_pretrained('bert-base-uncased')
 
# 给定句子，准备好输入
sentence = "I have a [MASK] named Charlie."
input_ids = tokenizer.encode(sentence, return_tensors='pt')
 
# 执行推断 (inference) ，得到预测分数分布
with torch.no_grad():
    output = model(input_ids)
 
# 解码预测分数分布，获取 top k 个预测标记及其概率
k = 5
probs = torch.nn.functional.softmax(output[0], dim=-1)[0]
top_k = torch.topk(probs, k=k)
for i, pred_idx in enumerate(top_k.indices):
    pred_prob = top_k.values[i]
    pred_token = tokenizer.convert_ids_to_tokens([pred_idx])[0]
    print("Top {} Prediction: '{}', Probability: {:2%}".format(i+1, pred_token, pred_prob.item()))