Xueguang Ma commited on
Commit
558cf93
1 Parent(s): 9c44247
Files changed (1) hide show
  1. wikipedia-wq-corpus.py +83 -0
wikipedia-wq-corpus.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # Lint as: python3
16
+ """Wikipedia WQ dataset."""
17
+ import json
18
+ import datasets
19
+
20
+ _CITATION = """
21
+ @inproceedings{karpukhin-etal-2020-dense,
22
+ title = "Dense Passage Retrieval for Open-Domain Question Answering",
23
+ author = "Karpukhin, Vladimir and Oguz, Barlas and Min, Sewon and Lewis, Patrick and Wu, Ledell and Edunov, Sergey and Chen, Danqi and Yih, Wen-tau",
24
+ booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
25
+ month = nov,
26
+ year = "2020",
27
+ address = "Online",
28
+ publisher = "Association for Computational Linguistics",
29
+ url = "https://www.aclweb.org/anthology/2020.emnlp-main.550",
30
+ doi = "10.18653/v1/2020.emnlp-main.550",
31
+ pages = "6769--6781",
32
+ }
33
+ """
34
+ _DESCRIPTION = "dataset load script for Wikipedia WQ Corpus"
35
+ _DATASET_URLS = {
36
+ 'train': "https://huggingface.co/datasets/Tevatron/wikipedia-nq-corpus/resolve/main/corpus.jsonl.gz",
37
+ }
38
+
39
+
40
+ class WikipediaWqCorpus(datasets.GeneratorBasedBuilder):
41
+ VERSION = datasets.Version("0.0.1")
42
+ BUILDER_CONFIGS = [
43
+ datasets.BuilderConfig(version=VERSION,
44
+ description="Wikipedia WQ corpus"),
45
+ ]
46
+
47
+ def _info(self):
48
+ features = datasets.Features(
49
+ {'docid': datasets.Value('string'), 'text': datasets.Value('string'),
50
+ 'title': datasets.Value('string')},
51
+ )
52
+ return datasets.DatasetInfo(
53
+ # This is the description that will appear on the datasets page.
54
+ description=_DESCRIPTION,
55
+ # This defines the different columns of the dataset and their types
56
+ features=features, # Here we define them above because they are different between the two configurations
57
+ supervised_keys=None,
58
+ # Homepage of the dataset for documentation
59
+ homepage="",
60
+ # License for the dataset if available
61
+ license="",
62
+ # Citation for the dataset
63
+ citation=_CITATION,
64
+ )
65
+
66
+ def _split_generators(self, dl_manager):
67
+ downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
68
+ splits = [
69
+ datasets.SplitGenerator(
70
+ name="train",
71
+ gen_kwargs={
72
+ "filepath": downloaded_files["train"],
73
+ },
74
+ ),
75
+ ]
76
+ return splits
77
+
78
+ def _generate_examples(self, filepath):
79
+ """Yields examples."""
80
+ with open(filepath, encoding="utf-8") as f:
81
+ for line in f:
82
+ data = json.loads(line)
83
+ yield data['docid'], data