from transformers import AutoTokenizer, AutoModel
import torch

#Model Download
from modelscope import snapshot_download
model_dir = snapshot_download('BAAI/bge-large-zh-v1.5',cache_dir="D:/evns/models")

model_name = model_dir
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModel.from_pretrained(model_name)

model.eval()

documents = ["深度学习技术在计算机视觉领域中非常重要。",
             "使用深度学习模型可以理解文档的深层语义。",
             "密集检索器的优势通过学习文档和查询的表示来提高检索的准确率。"
             ]
query = "密集检索的优势"

def get_embedding(text):
    inputs = tokenizer(text, padding=True, truncation=True, return_tensors="pt", max_length=512)
    with torch.no_grad():
        output = model(**inputs)
    embeddings = output[0][:,0]
    normalized_embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
    return normalized_embeddings

query_embedding = get_embedding(query)
print(query_embedding.shape)

doc_embeddings = torch.stack([get_embedding(doc) for doc in documents]).squeeze()
print(doc_embeddings.shape)

scores = query_embedding @ doc_embeddings.T
print(scores)



