File size: 8,635 Bytes
9ee7942
 
 
953a5fe
9ee7942
 
 
 
 
 
7ed0516
 
 
61f1703
7ed0516
 
61f1703
7ed0516
 
9ee7942
 
42bb141
9ee7942
 
7ed0516
db95913
2c251ab
9ee7942
 
 
7ef1f47
 
 
 
bda9a2d
7ef1f47
e01bf05
 
 
 
7ef1f47
1173f51
bda9a2d
 
529b27a
bda9a2d
5fd5d7c
 
 
bda9a2d
 
1173f51
 
 
 
 
7ef1f47
 
 
 
 
 
e01bf05
0c7c486
 
7ef1f47
e01bf05
 
5fd5d7c
 
 
bda9a2d
 
 
 
7ef1f47
 
bda9a2d
7ef1f47
 
 
 
ce1083a
7ef1f47
 
e01bf05
6891c3a
1f08c79
7ef1f47
1173f51
409d6d1
 
b3a8b63
7980273
 
 
b3a8b63
7ef1f47
1173f51
bda9a2d
1173f51
9ee7942
 
953a5fe
9ee7942
 
6e82cd7
953a5fe
9ee7942
 
953a5fe
45272c1
 
9ee7942
 
 
 
953a5fe
9ee7942
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5159835
16597d8
9ee7942
 
 
 
 
 
 
 
 
 
953a5fe
9ee7942
953a5fe
9ee7942
 
 
 
 
 
 
 
 
 
953a5fe
 
9ee7942
 
 
953a5fe
9ee7942
088d502
9ee7942
67b4a61
dc534a5
 
 
 
 
 
00e95b5
 
9ee7942
 
 
 
953a5fe
9ee7942
 
e8f7319
9ee7942
953a5fe
9ee7942
 
 
953a5fe
9ee7942
a312b05
16597d8
7acc1c8
9ee7942
 
9117e32
9ee7942
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
"""MTEB Results"""

import json

import datasets


logger = datasets.logging.get_logger(__name__)


_CITATION = """@article{muennighoff2022mteb,
  doi = {10.48550/ARXIV.2210.07316},
  url = {https://arxiv.org/abs/2210.07316},
  author = {Muennighoff, Niklas and Tazi, Nouamane and Magne, Lo{\"\i}c and Reimers, Nils},
  title = {MTEB: Massive Text Embedding Benchmark},
  publisher = {arXiv},
  journal={arXiv preprint arXiv:2210.07316},  
  year = {2022}
}
"""

_DESCRIPTION = """Results on MTEB"""

URL = "https://huggingface.co/datasets/mteb/results/resolve/main/paths.json"
VERSION = datasets.Version("1.0.1")
EVAL_LANGS = ['af', 'afr-eng', 'am', 'amh-eng', 'ang-eng', 'ar', 'ar-ar', 'ara-eng', 'arq-eng', 'arz-eng', 'ast-eng', 'awa-eng', 'az', 'aze-eng', 'bel-eng', 'ben-eng', 'ber-eng', 'bn', 'bos-eng', 'bre-eng', 'bul-eng', 'cat-eng', 'cbk-eng', 'ceb-eng', 'ces-eng', 'cha-eng', 'cmn-eng', 'cor-eng', 'csb-eng', 'cy', 'cym-eng', 'da', 'dan-eng', 'de', 'de-fr', 'de-pl', 'deu-eng', 'dsb-eng', 'dtp-eng', 'el', 'ell-eng', 'en', 'en-ar', 'en-de', 'en-en', 'en-tr', 'epo-eng', 'es', 'es-en', 'es-es', 'es-it', 'est-eng', 'eus-eng', 'fa', 'fao-eng', 'fi', 'fin-eng', 'fr', 'fr-en', 'fr-pl', 'fra-eng', 'fry-eng', 'gla-eng', 'gle-eng', 'glg-eng', 'gsw-eng', 'he', 'heb-eng', 'hi', 'hin-eng', 'hrv-eng', 'hsb-eng', 'hu', 'hun-eng', 'hy', 'hye-eng', 'id', 'ido-eng', 'ile-eng', 'ina-eng', 'ind-eng', 'is', 'isl-eng', 'it', 'it-en', 'ita-eng', 'ja', 'jav-eng', 'jpn-eng', 'jv', 'ka', 'kab-eng', 'kat-eng', 'kaz-eng', 'khm-eng', 'km', 'kn', 'ko', 'ko-ko', 'kor-eng', 'kur-eng', 'kzj-eng', 'lat-eng', 'lfn-eng', 'lit-eng', 'lv', 'lvs-eng', 'mal-eng', 'mar-eng', 'max-eng', 'mhr-eng', 'mkd-eng', 'ml', 'mn', 'mon-eng', 'ms', 'my', 'nb', 'nds-eng', 'nl', 'nl-ende-en', 'nld-eng', 'nno-eng', 'nob-eng', 'nov-eng', 'oci-eng', 'orv-eng', 'pam-eng', 'pes-eng', 'pl', 'pl-en', 'pms-eng', 'pol-eng', 'por-eng', 'pt', 'ro', 'ron-eng', 'ru', 'rus-eng', 'sl', 'slk-eng', 'slv-eng', 'spa-eng', 'sq', 'sqi-eng', 'srp-eng', 'sv', 'sw', 'swe-eng', 'swg-eng', 'swh-eng', 'ta', 'tam-eng', 'tat-eng', 'te', 'tel-eng', 'tgl-eng', 'th', 'tha-eng', 'tl', 'tr', 'tuk-eng', 'tur-eng', 'tzl-eng', 'uig-eng', 'ukr-eng', 'ur', 'urd-eng', 'uzb-eng', 'vi', 'vie-eng', 'war-eng', 'wuu-eng', 'xho-eng', 'yid-eng', 'yue-eng', 'zh', 'zh-CN', 'zh-TW', 'zh-en', 'zsm-eng']

SKIP_KEYS = ["std", "evaluation_time", "main_score", "threshold"]

MODELS = [
    "all-MiniLM-L12-v2",
    "all-MiniLM-L6-v2",
    "all-mpnet-base-v2",
    "allenai-specter",
    "bert-base-swedish-cased",
    "bert-base-uncased",
    "bge-base-zh",
    "bge-large-zh",
    "bge-large-zh-noinstruct",
    "bge-small-zh",
    "contriever-base-msmarco",
    "cross-en-de-roberta-sentence-transformer",
    "dfm-encoder-large-v1",
    "dfm-sentence-encoder-large-1",
    "distiluse-base-multilingual-cased-v2",
    "DanskBERT",
    "e5-base",
    "e5-large",
    "e5-small",
    "electra-small-nordic",
    "electra-small-swedish-cased-discriminator",
    "gbert-base",
    "gbert-large",
    "gelectra-base",
    "gelectra-large",
    "gottbert-base",
    "glove.6B.300d",
    "gtr-t5-base",
    "gtr-t5-large",
    "gtr-t5-xl",
    "gtr-t5-xxl",
    "komninos",
    "luotuo-bert-medium",
    "LaBSE",
    "LASER2",
    "msmarco-bert-co-condensor",
    "m3e-base",
    "m3e-large",
    "multilingual-e5-base",
    "multilingual-e5-large",
    "multilingual-e5-small",
    "nb-bert-base",
    "nb-bert-large",
    "norbert3-base",
    "norbert3-large",
    "paraphrase-multilingual-MiniLM-L12-v2",
    "paraphrase-multilingual-mpnet-base-v2",
    "sentence-bert-swedish-cased",
    "sentence-t5-base",
    "sentence-t5-large",
    "sentence-t5-xl",
    "sentence-t5-xxl",
    "sgpt-bloom-1b7-nli",
    "sgpt-bloom-7b1-msmarco",
    "sup-simcse-bert-base-uncased",
    "text2vec-base-chinese",
    "text2vec-large-chinese",
    "text-embedding-ada-002",
    "text-similarity-ada-001",
    "text-similarity-babbage-001",
    "text-similarity-curie-001",
    "text-similarity-davinci-001",
    "text-search-ada-doc-001",
    "text-search-ada-001",
    "text-search-babbage-001",
    "text-search-curie-001",
    "text-search-davinci-001",
    "unsup-simcse-bert-base-uncased",
    "use-cmlm-multilingual",
    "xlm-roberta-base",
    "xlm-roberta-large",
]


# Needs to be run whenever new files are added
def get_paths():
    import collections, json, os
    files = collections.defaultdict(list)
    for model_dir in os.listdir("results"):
        results_model_dir = os.path.join("results", model_dir)
        if not os.path.isdir(results_model_dir):
            print(f"Skipping {results_model_dir}")
            continue
        for res_file in os.listdir(results_model_dir):
            if res_file.endswith(".json"):
                results_model_file = os.path.join(results_model_dir, res_file)
                files[model_dir].append(results_model_file)
    with open("paths.json", "w") as f:
        json.dump(files, f)
    return files


class MTEBResults(datasets.GeneratorBasedBuilder):
    """MTEBResults"""

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(
            name=model,
            description=f"{model} MTEB results",
            version=VERSION,
        )
        for model in MODELS
    ]

    def _info(self):
        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=datasets.Features(
                {
                    "mteb_dataset_name": datasets.Value("string"),
                    "eval_language": datasets.Value("string"),
                    "metric": datasets.Value("string"),
                    "score": datasets.Value("float"),
                }
            ),
            supervised_keys=None,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        path_file = dl_manager.download_and_extract(URL)
        with open(path_file) as f:
            files = json.load(f)

        downloaded_files = dl_manager.download_and_extract(files[self.config.name])
        return [
            datasets.SplitGenerator(
                name=datasets.Split.TEST,
                gen_kwargs={'filepath': downloaded_files}
            )
        ]

    def _generate_examples(self, filepath):
        """This function returns the examples in the raw (text) form."""
        logger.info(f"Generating examples from {filepath}")

        out = []

        for path in filepath:
            with open(path, encoding="utf-8") as f:
                res_dict = json.load(f)
                ds_name = res_dict["mteb_dataset_name"]
                split = "test"
                if ds_name in ["AFQMC", "CmedqaRetrieval", "Cmnli", "CovidRetrieval", "DuRetrieval", "EcomRetrieval", "IFlyTek", "JDReview", "MedicalRetrieval", "MultilingualSentiment", "MMarcoReranking", "MMarcoRetrieval", "MSMARCO", "Ocnli", "OnlineShopping", "T2Reranking", "T2Retrieval", "TNews", "VideoRetrieval", "Waimai",]:
                    if "test" in res_dict:
                        split = "test"
                    elif "dev" in res_dict:
                        split = "dev"
                    elif "validation" in res_dict:
                        split = "validation"
                elif ds_name in ["DanishPoliticalCommentsClassification"]:
                    split = "train"
                if split not in res_dict:
                    print(f"Skipping {ds_name} as split {split} not present.")
                    continue
                res_dict = res_dict.get(split)
                is_multilingual = any(x in res_dict for x in EVAL_LANGS)
                langs = res_dict.keys() if is_multilingual else ["en"]
                for lang in langs:
                    if lang in SKIP_KEYS: continue
                    test_result_lang = res_dict.get(lang) if is_multilingual else res_dict
                    for metric, score in test_result_lang.items():
                        if not isinstance(score, dict):
                            score = {metric: score}
                        for sub_metric, sub_score in score.items():
                            if any(x in sub_metric for x in SKIP_KEYS): continue
                            out.append({
                                "mteb_dataset_name": ds_name,
                                "eval_language": lang if is_multilingual else "",
                                "metric": f"{metric}_{sub_metric}" if metric != sub_metric else metric,
                                "score": sub_score * 100,
                            })
        for idx, row in enumerate(sorted(out, key=lambda x: x["mteb_dataset_name"])):
            yield idx, row