from transformers import BertTokenizer
import transformers
import  huggingface_hub


print(huggingface_hub.__version__)
print(transformers.__version__)


# 初始化 tokenizer
tokenizer = BertTokenizer.from_pretrained('D:/wk/bert-base-uncased')

# 准备句子列表
sentences = ["Hello, my dog is cute", "BERT is a powerful model"]

# 使用 tokenizer 处理句子列表
inputs = tokenizer(sentences, return_tensors="pt", padding=True, truncation=True)
