from modelscope.hub.snapshot_download import snapshot_download
from transformers import BertTokenizer, BertModel
import torch

# 下载模型到本地目录
model_dir = snapshot_download('tiansz/bert-base-chinese', cache_dir='./bert-base-chinese')
print(f"模型已下载到: {model_dir}")

# 本地模型路径
model_path = model_dir  # 使用下载的模型路径

# 从本地加载分词器和模型
tokenizer = BertTokenizer.from_pretrained(model_path)
model = BertModel.from_pretrained(model_path)

# 将模型设置为评估模式
model.eval()

# 输入句子
sentence = "你好，今天天气怎么样？"

# 分词并转换为模型输入格式
inputs = tokenizer(sentence, return_tensors='pt')

# 获取词嵌入
with torch.no_grad():
    outputs = model(**inputs)

# 输出的最后一层隐藏状态（即词嵌入）
last_hidden_states = outputs.last_hidden_state

# 打印词嵌入的形状
print("Embeddings shape:", last_hidden_states.shape)  # [batch_size, sequence_length, hidden_size]

# 获取所有 token 的文本表示
tokens = tokenizer.convert_ids_to_tokens(inputs['input_ids'][0])

# 打印每个 token 及其对应的嵌入
for i, (token, embedding) in enumerate(zip(tokens, last_hidden_states[0])):
    print(f"Token {i}: {token}")
    print(f"Embedding: {embedding[:10]}...")  # 只打印前 10 维