

documents = {
    "深度学习技术在计算机视觉领域中非常重要。",
    "使用深度学习模型可以理解文档的深层语义。",
    "密集检索器的优势通过学习文档和查询的表示来提高检索的准确率。"
}

query = "密集检索的优势"

from transformers import BertTokenizer, BertModel
import torch

tokenizer = BertTokenizer.from_pretrained("D:\snmc\chinese-macbert-base")
model = BertModel.from_pretrained("D:\snmc\chinese-macbert-base")

def get_embedding(text):
    inputs = tokenizer(text, return_tensors="pt", padding = True, truncation=True, max_length=512)

    with torch.no_grad():
        outputs = model(**inputs)
    
    embeddings = outputs.last_hidden_state.mean(dim=1)
    return embeddings

query_embedding = get_embedding(query)
print(query_embedding.shape)

doc_embeddings = torch.stack([get_embedding(doc) for doc in documents]).squeeze()
print(doc_embeddings.shape)

from sklearn.metrics.pairwise import cosine_similarity

simialrities = cosine_similarity(query_embedding.numpy(), doc_embeddings.numpy())

print(simialrities[0])