Datasets:

ArXiv:
vjeronymo2 commited on
Commit
4187a7f
1 Parent(s): 80abb16

Adding readme and deleting script comments

Browse files
Files changed (2) hide show
  1. README.md +61 -0
  2. mrobust.py +1 -142
README.md ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dataset Summary
2
+
3
+ **mRobust** is a multilingual version of the [TREC 2004 Robust passage ranking dataset](https://trec.nist.gov/data/robust/04.guidelines.html).
4
+ For more information, checkout our papers:
5
+ <!-- * [**mRobust: A Multilingual Version of the MS MARCO Passage Ranking Dataset**](https://arxiv.org/abs/2108.13897)
6
+ * [**A cost-benefit analysis of cross-lingual transfer methods**](https://arxiv.org/abs/2105.06813) -->
7
+
8
+
9
+ The current version is composed 10 languages: Chinese, French, German, Indonesian, Italian, Portuguese, Russian, Spanish, Dutch and Vietnamese.
10
+
11
+
12
+ ### Supported languages
13
+
14
+ | Language name | Language code |
15
+ |---------------|---------------|
16
+ | English | english |
17
+ | Chinese | chinese |
18
+ | French | french |
19
+ | German | german |
20
+ | Indonesian | indonesian |
21
+ | Italian | italian |
22
+ | Portuguese | portuguese |
23
+ | Russian | russian |
24
+ | Spanish | spanish |
25
+ | Dutch | dutch |
26
+ | Vietnamese | vietnamese |
27
+
28
+
29
+ # Dataset Structure
30
+
31
+ You can load mRobust dataset by choosing a specific language. We include the translated collections of documents and queries.
32
+
33
+ #### Queries
34
+
35
+ ```python
36
+ >>> dataset = load_dataset('unicamp-dl/mrobust', 'queries-spanish')
37
+ >>> dataset['queries'][1]
38
+ {'id': '302', 'text': '¿Está controlada la enfermedad de la poliomielitis (polio) en el mundo?'}
39
+ ```
40
+
41
+ #### Collection
42
+
43
+ ```python
44
+ >>> dataset = load_dataset('unicamp-dl/mrobust', 'collection-portuguese')
45
+ >>> dataset['collection'][5]
46
+ {'id': 'FT931-16660', 'text': '930105 FT 05 JAN 93 / Cenelec: Correção O endereço do Cenelec, Comitê Europeu de Normalização Eletrotécnica, estava incorreto na edição de ontem. É Rue de Stassart 35, B-1050, Bruxelas, Tel (322) 519 6871. CEN, Comitê Europeu de Normalização, está localizado na Rue de Stassart 36, B-1050, Bruxelas, Tel 519 6811.'}
47
+ ```
48
+
49
+
50
+ # Citation Information
51
+ Soon.
52
+ <!-- ```
53
+ @misc{bonifacio2021mmarco,
54
+ title={mRobust: A Multilingual Version of MS MARCO Passage Ranking Dataset},
55
+ author={Luiz Henrique Bonifacio and Vitor Jeronymo and Hugo Queiroz Abonizio and Israel Campiotti and Marzieh Fadaee and and Roberto Lotufo and Rodrigo Nogueira},
56
+ year={2021},
57
+ eprint={2108.13897},
58
+ archivePrefix={arXiv},
59
+ primaryClass={cs.CL}
60
+ }
61
+ ``` -->
mrobust.py CHANGED
@@ -36,31 +36,6 @@ _DESCRIPTION = """
36
  Robust04 translated datasets
37
  """
38
 
39
-
40
- # def generate_examples_triples(filepath, collection_path, queries_path):
41
- # collection = {}
42
- # with open(collection_path, encoding="utf-8") as f:
43
- # for line in f:
44
- # doc_id, doc = line.rstrip().split("\t")
45
- # collection[doc_id] = doc
46
-
47
- # queries = {}
48
- # with open(queries_path, encoding="utf-8") as f:
49
- # for line in f:
50
- # query_id, query = line.rstrip().split("\t")
51
- # queries[query_id] = query
52
-
53
- # with open(filepath, encoding="utf-8") as f:
54
- # for (idx, line) in enumerate(f):
55
- # query_id, pos_id, neg_id = line.rstrip().split("\t")
56
- # features = {
57
- # "query": queries[query_id],
58
- # "positive": collection[pos_id],
59
- # "negative": collection[neg_id],
60
- # }
61
- # yield idx, features
62
-
63
-
64
  def generate_examples_tuples(filepath):
65
  with open(filepath, encoding="utf-8") as f:
66
  for (idx, line) in enumerate(f):
@@ -71,48 +46,9 @@ def generate_examples_tuples(filepath):
71
  }
72
  yield idx, features
73
 
74
-
75
- # def generate_examples_runs(filepath, collection_path, queries_path):
76
- # collection = {}
77
- # with open(collection_path, encoding="utf-8") as f:
78
- # for line in f:
79
- # doc_id, doc = line.rstrip().split("\t")
80
- # collection[doc_id] = doc
81
-
82
- # queries = {}
83
- # with open(queries_path, encoding="utf-8") as f:
84
- # for line in f:
85
- # query_id, query = line.rstrip().split("\t")
86
- # queries[query_id] = query
87
-
88
- # qid_to_ranked_candidate_passages = {}
89
- # with open(filepath, encoding="utf-8") as f:
90
- # for line in f:
91
- # qid, pid, rank = line.rstrip().split("\t")
92
- # if qid not in qid_to_ranked_candidate_passages:
93
- # qid_to_ranked_candidate_passages[qid] = []
94
- # qid_to_ranked_candidate_passages[qid].append(pid)
95
-
96
- # for (idx, qid) in enumerate(qid_to_ranked_candidate_passages):
97
- # features = {
98
- # "id": qid,
99
- # "query": queries[qid],
100
- # "passages": [
101
- # {
102
- # "id": pid,
103
- # "passage": collection[pid],
104
- # }
105
- # for pid in qid_to_ranked_candidate_passages[qid]
106
- # ],
107
- # }
108
- # yield idx, features
109
-
110
-
111
  _BASE_URLS = {
112
  "collections": "https://huggingface.co/datasets/unicamp-dl/mrobust/resolve/main/data/collections/",
113
  "queries": "https://huggingface.co/datasets/unicamp-dl/mrobust/resolve/main/data/queries/",
114
- # "runs": "https://huggingface.co/datasets/unicamp-dl/mrobust/resolve/main/data/runs/",
115
- # "train": "https://huggingface.co/datasets/unicamp-dl/mrobust/resolve/main/data/triples.train.ids.small.tsv",
116
  }
117
 
118
  LANGUAGES = [
@@ -133,14 +69,6 @@ LANGUAGES = [
133
  class MRobust(datasets.GeneratorBasedBuilder):
134
 
135
  BUILDER_CONFIGS = (
136
- # [
137
- # datasets.BuilderConfig(
138
- # name=language,
139
- # description=f"{language.capitalize()} triples",
140
- # version=datasets.Version("1.0.0"),
141
- # )
142
- # for language in LANGUAGES
143
- # ]
144
  [
145
  datasets.BuilderConfig(
146
  name=f"collection-{language}",
@@ -157,14 +85,6 @@ class MRobust(datasets.GeneratorBasedBuilder):
157
  )
158
  for language in LANGUAGES
159
  ]
160
- # + [
161
- # datasets.BuilderConfig(
162
- # name=f"runs-{language}",
163
- # description=f"{language.capitalize()} runs",
164
- # version=datasets.Version("1.0.0"),
165
- # )
166
- # for language in LANGUAGES
167
- # ]
168
  )
169
 
170
  DEFAULT_CONFIG_NAME = "english"
@@ -176,23 +96,6 @@ class MRobust(datasets.GeneratorBasedBuilder):
176
  "id": datasets.Value("string"),
177
  "text": datasets.Value("string"),
178
  }
179
- # elif name.startswith("runs"):
180
- # features = {
181
- # "id": datasets.Value("int32"),
182
- # "query": datasets.Value("string"),
183
- # "passages": datasets.Sequence(
184
- # {
185
- # "id": datasets.Value("int32"),
186
- # "passage": datasets.Value("string"),
187
- # }
188
- # ),
189
- # }
190
- # else:
191
- # features = {
192
- # "query": datasets.Value("string"),
193
- # "positive": datasets.Value("string"),
194
- # "negative": datasets.Value("string"),
195
- # }
196
 
197
  return datasets.DatasetInfo(
198
  description=f"{_DESCRIPTION}\n{self.config.description}",
@@ -213,53 +116,9 @@ class MRobust(datasets.GeneratorBasedBuilder):
213
  url = _BASE_URLS["queries"] + self.config.name[8:] + "_queries.tsv"
214
  dl_path = dl_manager.download_and_extract(url)
215
  return (datasets.SplitGenerator(name="queries", gen_kwargs={"filepath": dl_path}),)
216
- # elif self.config.name.startswith("runs"):
217
- # urls = {
218
- # "collection": _BASE_URLS["collections"] + self.config.name[5:] + "_collection.tsv",
219
- # "queries": _BASE_URLS["queries-dev"] + self.config.name[5:] + "_queries.dev.tsv",
220
- # "run": _BASE_URLS["runs"] + "run.bm25_" + self.config.name[5:] + ".txt",
221
- # }
222
-
223
- # dl_path = dl_manager.download_and_extract(urls)
224
- # return (
225
- # datasets.SplitGenerator(
226
- # name="bm25",
227
- # gen_kwargs={
228
- # "filepath": dl_path["run"],
229
- # "args": {
230
- # "collection": dl_path["collection"],
231
- # "queries": dl_path["queries"],
232
- # },
233
- # },
234
- # ),
235
- # )
236
- # else:
237
- # urls = {
238
- # "collection": _BASE_URLS["collections"] + self.config.name + "_collection.tsv",
239
- # "queries": _BASE_URLS["queries-train"] + self.config.name + "_queries.train.tsv",
240
- # "train": _BASE_URLS["train"],
241
- # }
242
- # dl_path = dl_manager.download_and_extract(urls)
243
-
244
- # return [
245
- # datasets.SplitGenerator(
246
- # name=datasets.Split.TRAIN,
247
- # gen_kwargs={
248
- # "filepath": dl_path["train"],
249
- # "args": {
250
- # "collection": dl_path["collection"],
251
- # "queries": dl_path["queries"],
252
- # },
253
- # },
254
- # )
255
- # ]
256
 
257
  def _generate_examples(self, filepath, args=None):
258
  """Yields examples."""
259
 
260
  if self.config.name.startswith("collection") or self.config.name.startswith("queries"):
261
- return generate_examples_tuples(filepath)
262
- # if self.config.name.startswith("runs"):
263
- # return generate_examples_runs(filepath, args["collection"], args["queries"])
264
- # else:
265
- # return generate_examples_triples(filepath, args["collection"], args["queries"])
 
36
  Robust04 translated datasets
37
  """
38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  def generate_examples_tuples(filepath):
40
  with open(filepath, encoding="utf-8") as f:
41
  for (idx, line) in enumerate(f):
 
46
  }
47
  yield idx, features
48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  _BASE_URLS = {
50
  "collections": "https://huggingface.co/datasets/unicamp-dl/mrobust/resolve/main/data/collections/",
51
  "queries": "https://huggingface.co/datasets/unicamp-dl/mrobust/resolve/main/data/queries/",
 
 
52
  }
53
 
54
  LANGUAGES = [
 
69
  class MRobust(datasets.GeneratorBasedBuilder):
70
 
71
  BUILDER_CONFIGS = (
 
 
 
 
 
 
 
 
72
  [
73
  datasets.BuilderConfig(
74
  name=f"collection-{language}",
 
85
  )
86
  for language in LANGUAGES
87
  ]
 
 
 
 
 
 
 
 
88
  )
89
 
90
  DEFAULT_CONFIG_NAME = "english"
 
96
  "id": datasets.Value("string"),
97
  "text": datasets.Value("string"),
98
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
  return datasets.DatasetInfo(
101
  description=f"{_DESCRIPTION}\n{self.config.description}",
 
116
  url = _BASE_URLS["queries"] + self.config.name[8:] + "_queries.tsv"
117
  dl_path = dl_manager.download_and_extract(url)
118
  return (datasets.SplitGenerator(name="queries", gen_kwargs={"filepath": dl_path}),)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
  def _generate_examples(self, filepath, args=None):
121
  """Yields examples."""
122
 
123
  if self.config.name.startswith("collection") or self.config.name.startswith("queries"):
124
+ return generate_examples_tuples(filepath)