This is a part of the MTEB test.
# !pip install tensorflow_text
import tensorflow_hub as hub
from tensorflow_text import SentencepieceTokenizer
import tensorflow as tf
embedder=hub.load("https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3")
class USE():
def encode(self, sentences, batch_size=32, **kwargs):
embeddings = []
for i in range(0, len(sentences), batch_size):
batch_sentences = sentences[i:i+batch_size]
batch_embeddings = embedder(batch_sentences)
embeddings.extend(batch_embeddings)
return embeddings
model = USE()
Spaces using vprelovac/universal-sentence-encoder-large-5 2
Evaluation results
- accuracy on MTEB AmazonCounterfactualClassification (en)test set self-reported76.194
- ap on MTEB AmazonCounterfactualClassification (en)test set self-reported39.250
- f1 on MTEB AmazonCounterfactualClassification (en)test set self-reported70.175
- accuracy on MTEB AmazonPolarityClassificationtest set self-reported69.629
- ap on MTEB AmazonPolarityClassificationtest set self-reported63.973
- f1 on MTEB AmazonPolarityClassificationtest set self-reported69.486
- accuracy on MTEB AmazonReviewsClassification (en)test set self-reported35.534
- f1 on MTEB AmazonReviewsClassification (en)test set self-reported34.974
- v_measure on MTEB ArxivClusteringP2Ptest set self-reported34.718
- v_measure on MTEB ArxivClusteringS2Stest set self-reported25.267