|
"""MTEB Results""" |
|
|
|
import json |
|
|
|
import datasets |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_CITATION = """@article{muennighoff2022mteb, |
|
doi = {10.48550/ARXIV.2210.07316}, |
|
url = {https://arxiv.org/abs/2210.07316}, |
|
author = {Muennighoff, Niklas and Tazi, Nouamane and Magne, Lo{\"\i}c and Reimers, Nils}, |
|
title = {MTEB: Massive Text Embedding Benchmark}, |
|
publisher = {arXiv}, |
|
journal={arXiv preprint arXiv:2210.07316}, |
|
year = {2022} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """Results on MTEB Portuguese""" |
|
|
|
URL = "https://huggingface.co/datasets/projetomemoreba/results/resolve/main/paths.json" |
|
VERSION = datasets.Version("1.0.1") |
|
EVAL_LANGS = ['pt'] |
|
|
|
SKIP_KEYS = ["std", "evaluation_time", "main_score", "threshold"] |
|
|
|
|
|
TRAIN_SPLIT = ["DanishPoliticalCommentsClassification"] |
|
|
|
VALIDATION_SPLIT = ["AFQMC", "Cmnli", "IFlyTek", "TNews", "MSMARCO", "MSMARCO-PL", "MultilingualSentiment", "Ocnli"] |
|
|
|
DEV_SPLIT = ["CmedqaRetrieval", "CovidRetrieval", "DuRetrieval", "EcomRetrieval", "MedicalRetrieval", "MMarcoReranking", "MMarcoRetrieval", "MSMARCO", "MSMARCO-PL", "T2Reranking", "T2Retrieval", "VideoRetrieval"] |
|
|
|
|
|
|
|
MODELS = [ |
|
"instructor-base", |
|
"xlm-roberta-large", |
|
"gtr-t5-large", |
|
"sentence-t5-xxl", |
|
"GIST-Embedding-v0", |
|
"e5-base", |
|
"mxbai-embed-2d-large-v1", |
|
"SGPT-5.8B-weightedmean-nli-bitfit", |
|
"jina-embeddings-v2-base-de", |
|
"gte-base", |
|
"jina-embedding-b-en-v1", |
|
"LaBSE", |
|
"sgpt-bloom-7b1-msmarco", |
|
"bi-cse", |
|
"distilbert-base-uncased", |
|
"bert-base-10lang-cased", |
|
"sentence-t5-large", |
|
"jina-embeddings-v2-small-en", |
|
"e5-mistral-7b-instruct", |
|
"bge-base-en-v1.5", |
|
"ember-v1", |
|
"e5-large-v2", |
|
"lodestone-base-4096-v1", |
|
"all-mpnet-base-v2", |
|
"sentence-t5-xl", |
|
"distilbert-base-en-fr-cased", |
|
"gte-tiny", |
|
"text2vec-base-multilingual", |
|
"GIST-all-MiniLM-L6-v2", |
|
"jina-embeddings-v2-base-es", |
|
"bert-base-multilingual-uncased", |
|
"distiluse-base-multilingual-cased-v2", |
|
"sup-simcse-bert-base-uncased", |
|
"e5-small-v2", |
|
"GritLM-7B", |
|
"sentence-t5-base", |
|
"SFR-Embedding-Mistral", |
|
"mxbai-embed-large-v1", |
|
"stella-base-en-v2", |
|
"udever-bloom-3b", |
|
"bert-base-multilingual-cased", |
|
"all-MiniLM-L12-v2", |
|
"sf_model_e5", |
|
"bert-base-portuguese-cased", |
|
"bge-small-en-v1.5", |
|
"SGPT-125M-weightedmean-msmarco-specb-bitfit", |
|
"udever-bloom-560m", |
|
"gtr-t5-base", |
|
"fin-mpnet-base", |
|
"SGPT-2.7B-weightedmean-msmarco-specb-bitfit", |
|
"xlm-roberta-base", |
|
"GIST-small-Embedding-v0", |
|
"gte-large", |
|
"ALL_862873", |
|
"e5-large", |
|
"distilbert-base-en-fr-es-pt-it-cased", |
|
"dfm-sentence-encoder-large-v1", |
|
"bge-micro", |
|
"instructor-large", |
|
"average_word_embeddings_glove.6B.300d", |
|
"multilingual-e5-large-instruct", |
|
"msmarco-bert-co-condensor", |
|
"multilingual-e5-small", |
|
"UAE-Large-V1", |
|
"udever-bloom-1b1", |
|
"distilbert-base-fr-cased", |
|
"instructor-xl", |
|
"bert-base-uncased", |
|
"all-MiniLM-L6-v2", |
|
"e5-base-v2", |
|
"jina-embedding-l-en-v1", |
|
"gtr-t5-xl", |
|
"gte-small", |
|
"bge-small-4096", |
|
"average_word_embeddings_komninos", |
|
"unsup-simcse-bert-base-uncased", |
|
"bert-base-15lang-cased", |
|
"paraphrase-multilingual-MiniLM-L12-v2", |
|
"distilbert-base-25lang-cased", |
|
"contriever-base-msmarco", |
|
"multilingual-e5-large", |
|
"luotuo-bert-medium", |
|
"GIST-large-Embedding-v0", |
|
"bge-large-en-v1.5", |
|
"cai-lunaris-text-embeddings", |
|
"gtr-t5-xxl", |
|
"multilingual-e5-base", |
|
"paraphrase-multilingual-mpnet-base-v2", |
|
"SGPT-1.3B-weightedmean-msmarco-specb-bitfit", |
|
"e5-dansk-test-0.1", |
|
"allenai-specter" |
|
] |
|
from pathlib import Path |
|
|
|
|
|
def get_paths(): |
|
import collections, json, os |
|
files = collections.defaultdict(list) |
|
for model_dir in os.listdir("results"): |
|
results_model_dir = os.path.join("results", model_dir) |
|
if not os.path.isdir(results_model_dir): |
|
print(f"Skipping {results_model_dir}") |
|
continue |
|
for res_file in os.listdir(results_model_dir): |
|
if res_file.endswith(".json"): |
|
results_model_file = os.path.join(results_model_dir, res_file) |
|
files[model_dir].append(results_model_file) |
|
|
|
with open("paths.json", "w") as f: |
|
json.dump(files, f) |
|
return files |
|
|
|
class MTEBResults(datasets.GeneratorBasedBuilder): |
|
"""MTEBResults""" |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name=model, |
|
description=f"{model} MTEB results", |
|
version=VERSION, |
|
) |
|
for model in MODELS |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"mteb_dataset_name": datasets.Value("string"), |
|
"eval_language": datasets.Value("string"), |
|
"metric": datasets.Value("string"), |
|
"score": datasets.Value("float"), |
|
} |
|
), |
|
supervised_keys=None, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
path_file = dl_manager.download_and_extract(URL) |
|
with open(path_file) as f: |
|
files = json.load(f) |
|
|
|
downloaded_files = dl_manager.download_and_extract(files[self.config.name]) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={'filepath': downloaded_files} |
|
) |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""This function returns the examples in the raw (text) form.""" |
|
logger.info(f"Generating examples from {filepath}") |
|
|
|
out = [] |
|
|
|
for path in filepath: |
|
with open(path, encoding="utf-8") as f: |
|
res_dict = json.load(f) |
|
ds_name = res_dict["mteb_dataset_name"] |
|
split = "test" |
|
if (ds_name in TRAIN_SPLIT) and ("train" in res_dict): |
|
split = "train" |
|
elif (ds_name in VALIDATION_SPLIT) and ("validation" in res_dict): |
|
split = "validation" |
|
elif (ds_name in DEV_SPLIT) and ("dev" in res_dict): |
|
split = "dev" |
|
elif "test" not in res_dict: |
|
print(f"Skipping {ds_name} as split {split} not present.") |
|
continue |
|
res_dict = res_dict.get(split) |
|
is_multilingual = any(x in res_dict for x in EVAL_LANGS) |
|
langs = res_dict.keys() if is_multilingual else ["en"] |
|
for lang in langs: |
|
if lang in SKIP_KEYS: continue |
|
test_result_lang = res_dict.get(lang) if is_multilingual else res_dict |
|
for metric, score in test_result_lang.items(): |
|
if not isinstance(score, dict): |
|
score = {metric: score} |
|
for sub_metric, sub_score in score.items(): |
|
if any(x in sub_metric for x in SKIP_KEYS): continue |
|
out.append({ |
|
"mteb_dataset_name": ds_name, |
|
"eval_language": lang if is_multilingual else "", |
|
"metric": f"{metric}_{sub_metric}" if metric != sub_metric else metric, |
|
"score": sub_score * 100, |
|
}) |
|
for idx, row in enumerate(sorted(out, key=lambda x: x["mteb_dataset_name"])): |
|
yield idx, row |
|
|