Xueguang Ma commited on
Commit
d3ef551
1 Parent(s): 0ad18a9
Files changed (3) hide show
  1. .gitattributes +1 -0
  2. corpus.jsonl.gz +3 -0
  3. wikipedia-trivia-corpus.py +89 -0
.gitattributes CHANGED
@@ -25,3 +25,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ corpus.jsonl.gz filter=lfs diff=lfs merge=lfs -text
corpus.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:048fa2fe425ed9a34f16b2c2822eeb9bd6805de42ab69b1c3e3daaed4725755e
3
+ size 4733423746
wikipedia-trivia-corpus.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ # Lint as: python3
17
+ """Wikipedia TriviaQA dataset."""
18
+
19
+ import json
20
+
21
+ import datasets
22
+
23
+ _CITATION = """
24
+ @inproceedings{karpukhin-etal-2020-dense,
25
+ title = "Dense Passage Retrieval for Open-Domain Question Answering",
26
+ author = "Karpukhin, Vladimir and Oguz, Barlas and Min, Sewon and Lewis, Patrick and Wu, Ledell and Edunov, Sergey and Chen, Danqi and Yih, Wen-tau",
27
+ booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
28
+ month = nov,
29
+ year = "2020",
30
+ address = "Online",
31
+ publisher = "Association for Computational Linguistics",
32
+ url = "https://www.aclweb.org/anthology/2020.emnlp-main.550",
33
+ doi = "10.18653/v1/2020.emnlp-main.550",
34
+ pages = "6769--6781",
35
+ }
36
+ """
37
+
38
+ _DESCRIPTION = "dataset load script for Wikipedia Trivia Corpus"
39
+
40
+ _DATASET_URLS = {
41
+ 'train': "https://huggingface.co/datasets/tevatron/wikipedia-trivia/resolve/main/corpus.jsonl.gz",
42
+ }
43
+
44
+
45
+ class WikipediaTriviaCorpus(datasets.GeneratorBasedBuilder):
46
+ VERSION = datasets.Version("0.0.1")
47
+
48
+ BUILDER_CONFIGS = [
49
+ datasets.BuilderConfig(version=VERSION,
50
+ description="Wikipedia Trivia corpus"),
51
+ ]
52
+
53
+ def _info(self):
54
+ features = datasets.Features(
55
+ {'docid': datasets.Value('string'), 'text': datasets.Value('string'),
56
+ 'title': datasets.Value('string')},
57
+ )
58
+ return datasets.DatasetInfo(
59
+ # This is the description that will appear on the datasets page.
60
+ description=_DESCRIPTION,
61
+ # This defines the different columns of the dataset and their types
62
+ features=features, # Here we define them above because they are different between the two configurations
63
+ supervised_keys=None,
64
+ # Homepage of the dataset for documentation
65
+ homepage="",
66
+ # License for the dataset if available
67
+ license="",
68
+ # Citation for the dataset
69
+ citation=_CITATION,
70
+ )
71
+
72
+ def _split_generators(self, dl_manager):
73
+ downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
74
+ splits = [
75
+ datasets.SplitGenerator(
76
+ name="train",
77
+ gen_kwargs={
78
+ "filepath": downloaded_files["train"],
79
+ },
80
+ ),
81
+ ]
82
+ return splits
83
+
84
+ def _generate_examples(self, filepath):
85
+ """Yields examples."""
86
+ with open(filepath, encoding="utf-8") as f:
87
+ for line in f:
88
+ data = json.loads(line)
89
+ yield data['docid'], data