import torch
from transformers import BertTokenizer, BertModel

# 加载BERT分词器
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# 加载BERT模型
model = BertModel.from_pretrained('bert-base-uncased')
# 推理模式
model.eval()
# 示例文本
text = "Hello, world!123123123"

# 分词
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True)

# 打印分词ID
print(inputs)

# 使用torch.no_grad()避免计算梯度
with torch.no_grad():
    outputs = model(**inputs)

# 获取输出的隐藏状态
last_hidden_state = outputs.last_hidden_state
pooler_output = outputs.pooler_output

# 打印输出的形状
print("Last hidden state shape:", last_hidden_state.shape)  # (batch_size, sequence_length, hidden_size)
print("Pooler output shape:", pooler_output.shape)          # (batch_size, hidden_size)

# from transformers import BertTokenizer, BertForQuestionAnswering
# import torch

# # 初始化 tokenizer 和模型
# tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
# model = BertForQuestionAnswering.from_pretrained("bert-base-uncased")

# # 准备输入数据
# context = "The capital of France is Paris."
# question = "What is the capital of France?"

# # 编码输入
# inputs = tokenizer.encode_plus(question, context, return_tensors="pt")

# # 获取输入 IDs 和注意力掩码
# input_ids = inputs["input_ids"]
# attention_mask = inputs["attention_mask"]

# # 模型推理
# with torch.no_grad():
#     outputs = model(input_ids, attention_mask=attention_mask)

# # 获取开始和结束的位置
# start_scores = outputs.start_logits
# end_scores = outputs.end_logits

# # 找到答案的开始和结束位置
# start_index = torch.argmax(start_scores)
# end_index = torch.argmax(end_scores)

# # 解码答案
# answer_tokens = input_ids[0][start_index:end_index + 1]
# answer = tokenizer.decode(answer_tokens)

# print(f"Answer: {answer}")  # 输出答案
