This is a part of the MTEB test.
# !pip install tensorflow_text
import tensorflow_hub as hub
from tensorflow_text import SentencepieceTokenizer
import tensorflow as tf
embedder=hub.load("https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3")
class USE():
def encode(self, sentences, batch_size=32, **kwargs):
embeddings = []
for i in range(0, len(sentences), batch_size):
batch_sentences = sentences[i:i+batch_size]
batch_embeddings = embedder(batch_sentences)
embeddings.extend(batch_embeddings)
return embeddings
model = USE()
- Downloads last month
- 0
Unable to determine this model's library. Check the
docs
.
Evaluation results
- accuracy on MTEB AmazonCounterfactualClassification (en)test set self-reported76.194
- ap on MTEB AmazonCounterfactualClassification (en)test set self-reported39.250
- f1 on MTEB AmazonCounterfactualClassification (en)test set self-reported70.175
- accuracy on MTEB AmazonPolarityClassificationtest set self-reported69.629
- ap on MTEB AmazonPolarityClassificationtest set self-reported63.973
- f1 on MTEB AmazonPolarityClassificationtest set self-reported69.486
- accuracy on MTEB AmazonReviewsClassification (en)test set self-reported35.534
- f1 on MTEB AmazonReviewsClassification (en)test set self-reported34.974
- v_measure on MTEB ArxivClusteringP2Ptest set self-reported34.718
- v_measure on MTEB ArxivClusteringS2Stest set self-reported25.267
- cos_sim_pearson on MTEB BIOSSEStest set self-reported69.650
- cos_sim_spearman on MTEB BIOSSEStest set self-reported69.356
- euclidean_pearson on MTEB BIOSSEStest set self-reported68.741
- euclidean_spearman on MTEB BIOSSEStest set self-reported69.356
- manhattan_pearson on MTEB BIOSSEStest set self-reported68.970
- manhattan_spearman on MTEB BIOSSEStest set self-reported69.206
- accuracy on MTEB Banking77Classificationtest set self-reported78.130
- f1 on MTEB Banking77Classificationtest set self-reported77.402
- v_measure on MTEB BiorxivClusteringP2Ptest set self-reported28.392
- v_measure on MTEB BiorxivClusteringS2Stest set self-reported20.515
- accuracy on MTEB EmotionClassificationtest set self-reported45.480
- f1 on MTEB EmotionClassificationtest set self-reported41.263