from transformers import BertTokenizer, BertModel
import torch

# 加载预训练的BERT模型和分词器
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')

def get_word_embedding(word):
    # 分词并转换为张量
    inputs = tokenizer(word, return_tensors='pt')
    outputs = model(**inputs)
    # 获取词嵌入
    word_embedding = outputs.last_hidden_state.mean(dim=1).detach().numpy()
    return word_embedding

# 示例
word = "example"
embedding = get_word_embedding(word)
print(embedding)
