import os

from transformers import AutoTokenizer, AutoModel

# 本地模型目录
model_dir = os.path.join('D:', os.path.sep, 'ModelSpace', 'Qwen2.5', 'Qwen2.5-1.5B-Instruct')

# 初始化分词器和模型
tokenizer = AutoTokenizer.from_pretrained(
    model_dir,
    local_files_only=True,
)

model = AutoModel.from_pretrained(
    model_dir,
    torch_dtype='auto',
    device_map='auto',
    local_files_only=True,
)

# 查看嵌入矩阵和大小
embeddings = model.get_input_embeddings()
print(f'嵌入矩阵大小: {embeddings.weight.size()}')
# 输出：嵌入矩阵大小: torch.Size([151936, 1536])

# 查看整个嵌入矩阵
# print(embeddings.weight)

# 分词
word = 'Hi, 你好~'
tokens = tokenizer.tokenize(word)
print(f'{word} 分词: {tokens}')
# 输出：Hi, 你好~ 分词: ['Hi', ',', 'Ġ', 'ä½łå¥½', '~']

token_id = tokenizer.convert_tokens_to_ids(tokens)
print(f'{word} Toke ID: {token_id}')
# 输出：Hi, 你好~ Toke ID: [13048, 11, 220, 108386, 93]

# 获取该单词的嵌入向量
word_embedding = embeddings.weight[token_id]
print(f'{word} 的嵌入形状：{word_embedding.shape}')
# 输出：Hi, 你好~ 的嵌入形状：torch.Size([5, 1536])

print(f'{word} 的嵌入内容: {word_embedding}')
# 输出：
# Hi, 你好~ 的嵌入内容: tensor([[ 0.0261,  0.0048, -0.0043,  ...,  0.0193, -0.0493, -0.0020],
#         [-0.0303, -0.0159, -0.0107,  ..., -0.0198, -0.0020, -0.0129],
#         [-0.0236, -0.0254,  0.0325,  ..., -0.0317, -0.0082,  0.0137],
#         [ 0.0270,  0.0042,  0.0014,  ...,  0.0425, -0.0195,  0.0011],
#         [-0.0205, -0.0408, -0.0013,  ...,  0.0272, -0.0060,  0.0032]],
#        dtype=torch.bfloat16, grad_fn=<IndexBackward0>)
