import torch
from transformers import AutoTokenizer, BertForQuestionAnswering

# 加载 Tokenizer 和 Model
tokenizer = AutoTokenizer.from_pretrained("deepset/bert-base-cased-squad2")
model = BertForQuestionAnswering.from_pretrained("deepset/bert-base-cased-squad2")

# 输入问题和上下文
question = "Who was Jim Henson?"
context = "Jim Henson was a nice puppet"

# 进行 Tokenize
inputs = tokenizer(question, context, return_tensors="pt")

# 预测
with torch.no_grad():
    outputs = model(**inputs)

# 获取起始和结束索引
answer_start_index = outputs.start_logits.argmax()
answer_end_index = outputs.end_logits.argmax()

# 解码预测出的答案
tokens = inputs.input_ids[0, answer_start_index: answer_end_index + 1]
predicted_answer = tokenizer.decode(tokens, skip_special_tokens=True)
print(predicted_answer)  # 输出: 'a nice puppet'

# 计算损失（示例，仅用于训练）
target_start_index = torch.tensor([14])
target_end_index = torch.tensor([15])
outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index)
loss = outputs.loss
print(round(loss.item(), 2))  # 示例损失: 7.41
