|
"""MTEB Results""" |
|
|
|
import json |
|
import collections |
|
import os |
|
import datasets |
|
import huggingface_hub |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_CITATION = """@article{muennighoff2022mteb, |
|
doi = {10.48550/ARXIV.2210.07316}, |
|
url = {https://arxiv.org/abs/2210.07316}, |
|
author = {Muennighoff, Niklas and Tazi, Nouamane and Magne, Lo{\"\i}c and Reimers, Nils}, |
|
title = {MTEB: Massive Text Embedding Benchmark}, |
|
publisher = {arXiv}, |
|
journal={arXiv preprint arXiv:2210.07316}, |
|
year = {2022} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """Results on MTEB""" |
|
|
|
|
|
REPO_ID = "pt-mteb/results" |
|
VERSION = datasets.Version("1.0.1") |
|
EVAL_LANGS = ['af', 'afr-por', 'am', "amh", 'amh-por', 'ang-por', 'ar', 'ar-ar', 'ara-por', 'arq-por', 'arz-por', 'ast-por', 'awa-por', 'az', 'aze-por', 'bel-por', 'ben-por', 'ber-por', 'bn', 'bos-por', 'bre-por', 'bul-por', 'cat-por', 'cbk-por', 'ceb-por', 'ces-por', 'cha-por', 'cmn-por', 'cor-por', 'csb-por', 'cy', 'cym-por', 'da', 'dan-por', 'de', 'de-fr', 'de-pl', 'deu-por', 'dsb-por', 'dtp-por', 'el', 'ell-por', 'en', 'pt-ar', 'pt-de', 'pt-pt', 'pt-tr', 'por', 'epo-por', 'es', 'es-pt', 'es-es', 'es-it', 'est-por', 'eus-por', 'fa', 'fao-por', 'fi', 'fin-por', 'fr', 'fr-pt', 'fr-pl', 'fra', 'fra-por', 'fry-por', 'gla-por', 'gle-por', 'glg-por', 'gsw-por', 'hau', 'he', 'heb-por', 'hi', 'hin-por', 'hrv-por', 'hsb-por', 'hu', 'hun-por', 'hy', 'hye-por', 'ibo', 'id', 'ido-por', 'ile-por', 'ina-por', 'ind-por', 'is', 'isl-por', 'it', 'it-pt', 'ita-por', 'ja', 'jav-por', 'jpn-por', 'jv', 'ka', 'kab-por', 'kat-por', 'kaz-por', 'khm-por', 'km', 'kn', 'ko', 'ko-ko', 'kor-por', 'kur-por', 'kzj-por', 'lat-por', 'lfn-por', 'lit-por', 'lin', 'lug', 'lv', 'lvs-por', 'mal-por', 'mar-por', 'max-por', 'mhr-por', 'mkd-por', 'ml', 'mn', 'mon-por', 'ms', 'my', 'nb', 'nds-por', 'nl', 'nl-ptde-pt', 'nld-por', 'nno-por', 'nob-por', 'nov-por', 'oci-por', 'orm', 'orv-por', 'pam-por', 'pcm', 'pes-por', 'pl', 'pl-pt', 'pms-por', 'pol-por', 'por-por', 'pt', 'ro', 'ron-por', 'ru', 'run', 'rus-por', 'sl', 'slk-por', 'slv-por', 'spa-por', 'sna', 'som', 'sq', 'sqi-por', 'srp-por', 'sv', 'sw', 'swa', 'swe-por', 'swg-por', 'swh-por', 'ta', 'tam-por', 'tat-por', 'te', 'tel-por', 'tgl-por', 'th', 'tha-por', 'tir', 'tl', 'tr', 'tuk-por', 'tur-por', 'tzl-por', 'uig-por', 'ukr-por', 'ur', 'urd-por', 'uzb-por', 'vi', 'vie-por', 'war-por', 'wuu-por', 'xho', 'xho-por', 'yid-por', 'yor', 'yue-por', 'zh', 'zh-CN', 'zh-TW', 'zh-pt', 'zsm-por', "eng_Latn-por_Latn","spa_Latn-por_Latn","fra_Latn-por_Latn","ita_Latn-por_Latn","deu_Latn-por_Latn","jpn_Jpan-por_Latn","kor_Hang-por_Latn","rus_Cyrl-por_Latn","arb_Arab-por_Latn","zho_Hant-por_Latn","zho_Hans-por_Latn","pol_Latn-por_Latn","swe_Latn-por_Latn"] |
|
|
|
SKIP_KEYS = ["std", "evaluation_time", "main_score", "threshold"] |
|
|
|
|
|
TRAIN_SPLIT = ["DanishPoliticalCommentsClassification"] |
|
|
|
VALIDATION_SPLIT = ["AFQMC", "Cmnli", "IFlyTek", "TNews", "MSMARCO", "MSMARCO-PL", "MultilingualSentiment", "Ocnli"] |
|
|
|
DEV_SPLIT = ["CmedqaRetrieval", "CovidRetrieval", "DuRetrieval", "EcomRetrieval", "MedicalRetrieval", "MMarcoReranking", "MMarcoRetrieval", "MSMARCO", "MSMARCO-PL", "T2Reranking", "T2Retrieval", "VideoRetrieval", "FloresBitextMining"] |
|
|
|
TESTFULL_SPLIT = ["OpusparcusPC"] |
|
|
|
|
|
def get_paths(): |
|
import collections, json, os |
|
files = collections.defaultdict(list) |
|
for base in os.listdir("results"): |
|
if not os.path.isdir(os.path.join("results", base)): |
|
continue |
|
results_base_dir = os.path.join("results", base) |
|
result_dirs = [] |
|
for d in os.listdir(results_base_dir): |
|
current_path = os.path.join(results_base_dir, d) |
|
added_root = False |
|
if os.path.isdir(current_path): |
|
result_dirs.append((os.path.join(base,d), current_path)) |
|
elif current_path.endswith('.json') and not added_root: |
|
result_dirs.append((base, results_base_dir)) |
|
added_root = True |
|
for model_dir, results_model_dir in result_dirs: |
|
for res_file in os.listdir(results_model_dir): |
|
if res_file.endswith(".json"): |
|
results_model_file = os.path.join(results_model_dir, res_file) |
|
files[model_dir].append(results_model_file) |
|
with open("paths.json", "w") as f: |
|
json.dump(files, f) |
|
return files |
|
|
|
|
|
|
|
|
|
dataset_info = huggingface_hub.dataset_info(REPO_ID) |
|
filepaths = [r.rfilename for r in dataset_info.siblings] |
|
paths = collections.defaultdict(list) |
|
for filepath in filepaths: |
|
if 'ipynb' in filepath: |
|
continue |
|
if not filepath.startswith('results/') or not filepath.endswith('.json'): |
|
continue |
|
model_name = os.path.dirname(filepath) |
|
model_name = model_name[len('results/'):] |
|
paths[model_name].append(filepath) |
|
|
|
MODELS = list(paths.keys()) |
|
|
|
class MTEBConfig(datasets.BuilderConfig): |
|
|
|
def __init__(self, |
|
complete_name=None, |
|
*args, |
|
**kwargs): |
|
super().__init__(*args, **kwargs) |
|
self.complete_name = complete_name |
|
|
|
class MTEBResults(datasets.GeneratorBasedBuilder): |
|
"""MTEBResults""" |
|
|
|
BUILDER_CONFIGS = [ |
|
MTEBConfig( |
|
name=model.replace('/', '__') if '/' in model else model, |
|
description=f"{model} MTEB results", |
|
version=VERSION, |
|
complete_name=model, |
|
) |
|
for model in MODELS |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"mteb_dataset_name": datasets.Value("string"), |
|
"eval_language": datasets.Value("string"), |
|
"metric": datasets.Value("string"), |
|
"score": datasets.Value("float"), |
|
} |
|
), |
|
supervised_keys=None, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
global paths |
|
|
|
|
|
|
|
files = paths |
|
|
|
downloaded_files = dl_manager.download_and_extract(files[self.config.complete_name]) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={'filepath': downloaded_files} |
|
) |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""This function returns the examples in the raw (text) form.""" |
|
logger.info(f"Generating examples from {filepath}") |
|
|
|
out = [] |
|
|
|
for path in filepath: |
|
with open(path, encoding="utf-8") as f: |
|
res_dict = json.load(f) |
|
ds_name = res_dict["mteb_dataset_name"] |
|
split = "test" |
|
if (ds_name in TRAIN_SPLIT) and ("train" in res_dict): |
|
split = "train" |
|
elif (ds_name in VALIDATION_SPLIT) and ("validation" in res_dict): |
|
split = "validation" |
|
elif (ds_name in DEV_SPLIT) and ("dev" in res_dict): |
|
split = "dev" |
|
elif (ds_name in TESTFULL_SPLIT) and ("test.full" in res_dict): |
|
split = "test.full" |
|
elif "test" not in res_dict: |
|
print(f"Skipping {ds_name} as split {split} not present.") |
|
continue |
|
res_dict = res_dict.get(split) |
|
is_multilingual = any(x in res_dict for x in EVAL_LANGS) |
|
langs = res_dict.keys() if is_multilingual else ["pt"] |
|
for lang in langs: |
|
if lang in SKIP_KEYS: continue |
|
test_result_lang = res_dict.get(lang) if is_multilingual else res_dict |
|
for metric, score in test_result_lang.items(): |
|
if not isinstance(score, dict): |
|
score = {metric: score} |
|
for sub_metric, sub_score in score.items(): |
|
if any(x in sub_metric for x in SKIP_KEYS): continue |
|
if isinstance(sub_score, dict) or isinstance(sub_score, list): continue |
|
out.append({ |
|
"mteb_dataset_name": ds_name, |
|
"eval_language": lang if is_multilingual else "", |
|
"metric": f"{metric}_{sub_metric}" if metric != sub_metric else metric, |
|
"score": sub_score * 100, |
|
}) |
|
for idx, row in enumerate(sorted(out, key=lambda x: x["mteb_dataset_name"])): |
|
yield idx, row |