Datasets:

ArXiv:
hugo commited on
Commit
94ff39a
1 Parent(s): 0314870

Add loading script

Browse files
Files changed (1) hide show
  1. mmarco.py +270 -0
mmarco.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """mMARCO dataset."""
18
+
19
+ import datasets
20
+
21
+
22
+ _CITATION = """
23
+ @misc{bonifacio2021mmarco,
24
+ title={mMARCO: A Multilingual Version of the MS MARCO Passage Ranking Dataset},
25
+ author={Luiz Henrique Bonifacio and Israel Campiotti and Vitor Jeronymo and Hugo Queiroz Abonizio and Roberto Lotufo and Rodrigo Nogueira},
26
+ year={2021},
27
+ eprint={2108.13897},
28
+ archivePrefix={arXiv},
29
+ primaryClass={cs.CL}
30
+ }
31
+ """
32
+
33
+ _URL = "https://github.com/unicamp-dl/mMARCO"
34
+
35
+ _DESCRIPTION = """
36
+ mMARCO translated datasets
37
+ """
38
+
39
+
40
+ def generate_examples_triples(filepath, collection_path, queries_path):
41
+ collection = {}
42
+ with open(collection_path, encoding="utf-8") as f:
43
+ for line in f:
44
+ doc_id, doc = line.rstrip().split("\t")
45
+ collection[doc_id] = doc
46
+
47
+ queries = {}
48
+ with open(queries_path, encoding="utf-8") as f:
49
+ for line in f:
50
+ query_id, query = line.rstrip().split("\t")
51
+ queries[query_id] = query
52
+
53
+ with open(filepath, encoding="utf-8") as f:
54
+ for (idx, line) in enumerate(f):
55
+ query_id, pos_id, neg_id = line.rstrip().split("\t")
56
+ features = {
57
+ "query": queries[query_id],
58
+ "positive": collection[pos_id],
59
+ "negative": collection[neg_id],
60
+ }
61
+ yield idx, features
62
+
63
+
64
+ def generate_examples_tuples(filepath):
65
+ with open(filepath, encoding="utf-8") as f:
66
+ for (idx, line) in enumerate(f):
67
+ idx, text = line.rstrip().split("\t")
68
+ features = {
69
+ "id": idx,
70
+ "text": text,
71
+ }
72
+ yield idx, features
73
+
74
+
75
+ def generate_examples_runs(filepath, collection_path, queries_path):
76
+ collection = {}
77
+ with open(collection_path, encoding="utf-8") as f:
78
+ for line in f:
79
+ doc_id, doc = line.rstrip().split("\t")
80
+ collection[doc_id] = doc
81
+
82
+ queries = {}
83
+ with open(queries_path, encoding="utf-8") as f:
84
+ for line in f:
85
+ query_id, query = line.rstrip().split("\t")
86
+ queries[query_id] = query
87
+
88
+ qid_to_ranked_candidate_passages = {}
89
+ with open(filepath, encoding="utf-8") as f:
90
+ for line in f:
91
+ qid, pid, rank = line.rstrip().split("\t")
92
+ if qid not in qid_to_ranked_candidate_passages:
93
+ qid_to_ranked_candidate_passages[qid] = []
94
+ qid_to_ranked_candidate_passages[qid].append(pid)
95
+
96
+ for (idx, qid) in enumerate(qid_to_ranked_candidate_passages):
97
+ features = {
98
+ "id": qid,
99
+ "query": queries[qid],
100
+ "passages": [
101
+ {
102
+ "id": pid,
103
+ "passage": collection[pid],
104
+ }
105
+ for pid in qid_to_ranked_candidate_passages[qid]
106
+ ],
107
+ }
108
+ yield idx, features
109
+
110
+
111
+ _BASE_URLS = {
112
+ "collections": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/v2/collections/",
113
+ "queries-train": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/v2/queries/train/",
114
+ "queries-dev": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/v2/queries/dev/",
115
+ "runs": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/v2/runs/",
116
+ "train": "https://huggingface.co/datasets/unicamp-dl/mmarco/resolve/main/data/triples.train.ids.small.tsv",
117
+ }
118
+
119
+ LANGUAGES = [
120
+ "chinese",
121
+ "english",
122
+ "french",
123
+ "german",
124
+ "indonesian",
125
+ "italian",
126
+ "portuguese",
127
+ "russian",
128
+ "spanish",
129
+ ]
130
+
131
+
132
+ class MMarco(datasets.GeneratorBasedBuilder):
133
+
134
+ BUILDER_CONFIGS = (
135
+ [
136
+ datasets.BuilderConfig(
137
+ name=language,
138
+ description=f"{language.capitalize()} version v2",
139
+ version=datasets.Version("2.0.0"),
140
+ )
141
+ for language in LANGUAGES
142
+ ]
143
+ + [
144
+ datasets.BuilderConfig(
145
+ name=f"collection-{language}",
146
+ description=f"{language.capitalize()} collection version v2",
147
+ version=datasets.Version("2.0.0"),
148
+ )
149
+ for language in LANGUAGES
150
+ ]
151
+ + [
152
+ datasets.BuilderConfig(
153
+ name=f"queries-{language}",
154
+ description=f"{language.capitalize()} queries version v2",
155
+ version=datasets.Version("2.0.0"),
156
+ )
157
+ for language in LANGUAGES
158
+ ]
159
+ + [
160
+ datasets.BuilderConfig(
161
+ name=f"runs-{language}",
162
+ description=f"{language.capitalize()} runs version v2",
163
+ version=datasets.Version("2.0.0"),
164
+ )
165
+ for language in LANGUAGES
166
+ ]
167
+ )
168
+
169
+ DEFAULT_CONFIG_NAME = "english"
170
+
171
+ def _info(self):
172
+ name = self.config.name
173
+ if name.startswith("collection") or name.startswith("queries"):
174
+ features = {
175
+ "id": datasets.Value("int32"),
176
+ "text": datasets.Value("string"),
177
+ }
178
+ elif name.startswith("runs"):
179
+ features = {
180
+ "id": datasets.Value("int32"),
181
+ "query": datasets.Value("string"),
182
+ "passages": datasets.Sequence(
183
+ {
184
+ "id": datasets.Value("int32"),
185
+ "passage": datasets.Value("string"),
186
+ }
187
+ ),
188
+ }
189
+ else:
190
+ features = {
191
+ "query": datasets.Value("string"),
192
+ "positive": datasets.Value("string"),
193
+ "negative": datasets.Value("string"),
194
+ }
195
+
196
+ return datasets.DatasetInfo(
197
+ description=f"{_DESCRIPTION}\n{self.config.description}",
198
+ features=datasets.Features(features),
199
+ supervised_keys=None,
200
+ homepage=_URL,
201
+ citation=_CITATION,
202
+ )
203
+
204
+ def _split_generators(self, dl_manager):
205
+ """Returns SplitGenerators."""
206
+
207
+ if self.config.name.startswith("collection"):
208
+ url = _BASE_URLS["collections"] + self.config.name[11:] + "_collection.tsv"
209
+ dl_path = dl_manager.download_and_extract(url)
210
+ return (datasets.SplitGenerator(name="collection", gen_kwargs={"filepath": dl_path}),)
211
+ elif self.config.name.startswith("queries"):
212
+ urls = {
213
+ "train": _BASE_URLS["queries-train"] + self.config.name[8:] + "_queries.train.tsv",
214
+ "dev": _BASE_URLS["queries-dev"] + self.config.name[8:] + "_queries.dev.tsv",
215
+ }
216
+ dl_path = dl_manager.download_and_extract(urls)
217
+ return [
218
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": dl_path["train"]}),
219
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": dl_path["dev"]}),
220
+ ]
221
+ elif self.config.name.startswith("runs"):
222
+ urls = {
223
+ "collection": _BASE_URLS["collections"] + self.config.name[5:] + "_collection.tsv",
224
+ "queries": _BASE_URLS["queries-dev"] + self.config.name[5:] + "_queries.dev.tsv",
225
+ "run": _BASE_URLS["runs"] + "run.bm25_" + self.config.name[5:] + ".txt",
226
+ }
227
+
228
+ dl_path = dl_manager.download_and_extract(urls)
229
+ return (
230
+ datasets.SplitGenerator(
231
+ name="bm25",
232
+ gen_kwargs={
233
+ "filepath": dl_path["run"],
234
+ "args": {
235
+ "collection": dl_path["collection"],
236
+ "queries": dl_path["queries"],
237
+ },
238
+ },
239
+ ),
240
+ )
241
+ else:
242
+ urls = {
243
+ "collection": _BASE_URLS["collections"] + self.config.name + "_collection.tsv",
244
+ "queries": _BASE_URLS["queries-train"] + self.config.name + "_queries.train.tsv",
245
+ "train": _BASE_URLS["train"],
246
+ }
247
+ dl_path = dl_manager.download_and_extract(urls)
248
+
249
+ return [
250
+ datasets.SplitGenerator(
251
+ name=datasets.Split.TRAIN,
252
+ gen_kwargs={
253
+ "filepath": dl_path["train"],
254
+ "args": {
255
+ "collection": dl_path["collection"],
256
+ "queries": dl_path["queries"],
257
+ },
258
+ },
259
+ )
260
+ ]
261
+
262
+ def _generate_examples(self, filepath, args=None):
263
+ """Yields examples."""
264
+
265
+ if self.config.name.startswith("collection") or self.config.name.startswith("queries"):
266
+ return generate_examples_tuples(filepath)
267
+ if self.config.name.startswith("runs"):
268
+ return generate_examples_runs(filepath, args["collection"], args["queries"])
269
+ else:
270
+ return generate_examples_triples(filepath, args["collection"], args["queries"])