from transformers import BertForQuestionAnswering, BertTokenizer
import torch

model = BertForQuestionAnswering.from_pretrained(
    "bert-large-uncased-whole-word-masking-finetuned-squad"
)

tokenizer = BertTokenizer.from_pretrained(
    "bert-large-uncased-whole-word-masking-finetuned-squad"
)

# 预处理输入
question = "What is the immune system?"
paragraph = "The immune system is a system of many biological structures and processes within an organism that protects against disease. To function properly, an immune system must detect a wide variety of agents, known as pathogens, from viruses to parasitic worms, and distinguish them from the organism's own healthy tissue."

question = "[CLS] " + question + "[SEP]"
paragraph = paragraph + "[SEP]"

# 对问题和段落进行分词
question_tokens = tokenizer.tokenize(question)
paragraph_tokens = tokenizer.tokenize(paragraph)

# 组合问题和段落标记，并将它们转换为`input_ids`
tokens = question_tokens + paragraph_tokens
input_ids = tokenizer.convert_tokens_to_ids(tokens)

# 定义segment_ids
segment_ids = [0] * len(question_tokens)
segment_ids += [1] * len(paragraph_tokens)

# 把`input_ids`和`segment_ids`转换为Tensor：
input_ids = torch.tensor([input_ids])
segment_ids = torch.tensor([segment_ids])

# 获取答案
start_scores, end_scores = model(
    input_ids, token_type_ids=segment_ids, return_dict=False
)

start_index = torch.argmax(start_scores)
end_index = torch.argmax(end_scores)

print(" ".join(tokens[start_index : end_index + 1]))
