I don't get same value with hosted API

#1
by irensaltali - opened

I'm getting a float array if I apply to code at README. How can I get the same value with hosted endpoint?

I solved it. Here is the code:

from transformers import AutoTokenizer, AutoModel
import torch
import torch.nn.functional as F

# Mean Pooling - Take attention mask into account for correct averaging
def mean_pooling(model_output, attention_mask):
    token_embeddings = model_output[0]
    input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
    return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)

# Sentences we want sentence embeddings for
sentences = ["This is an example sentence", "Each sentence is converted"]

# Load model from HuggingFace Hub
tokenizer = AutoTokenizer.from_pretrained('sentence-transformers/all-MiniLM-L6-v2')
model = AutoModel.from_pretrained('sentence-transformers/all-MiniLM-L6-v2')

# Tokenize sentences
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')

# Compute token embeddings
with torch.no_grad():
model_output = model(**encoded_input)

# Perform pooling
sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])

# Normalize embeddings
sentence_embeddings_normalized = F.normalize(sentence_embeddings, p=2, dim=1)

# Calculate cosine similarities between first sentence and all others
first_sentence_embedding = sentence_embeddings_normalized[0].unsqueeze(0)
cosine_similarities = F.cosine_similarity(first_sentence_embedding, sentence_embeddings_normalized)

print("Cosine similarities between first and other sentences:")
print(cosine_similarities)
irensaltali changed discussion status to closed

Sign up or log in to comment