import torch
from transformers import BertTokenizer, BertModel

local_model_path = 'D:/ollama_models/embeddings/chinese-bert-wwm-ext'
# local_model_path = 'D:/ollama_models/embeddings/bge-large-zh-v1.5'

# 加载分词器
tokenizer = BertTokenizer.from_pretrained(local_model_path)

# 加载预训练的BERT模型
model = BertModel.from_pretrained(local_model_path)

def get_text_embedding(text):
    # 使用分词器对输入文本进行编码
    inputs = tokenizer(text, return_tensors='pt', truncation=True, padding=True, max_length=512)

    # 将输入文本传递给模型进行推理
    with torch.no_grad():
        outputs = model(**inputs)

    # 获取最后一层的隐藏状态 (hidden states)
    # outputs[0] 是形状为 [batch_size, sequence_length, hidden_size] 的张量
    last_hidden_states = outputs.last_hidden_state

    # 通常使用 [CLS] 令牌的输出作为句子的向量表示
    # sentence_embedding = last_hidden_states[:, 0, :]
    sentence_embedding = last_hidden_states[0].mean(dim=0).detach().numpy().tolist()

    return sentence_embedding
