Xueguang Ma commited on
Commit
ad8db44
1 Parent(s): 32eecc7

add msmarco passage

Browse files
Files changed (4) hide show
  1. .gitattributes +2 -0
  2. dev.jsonl.gz +3 -0
  3. msmarco-passage.py +104 -0
  4. train.jsonl.gz +3 -0
.gitattributes CHANGED
@@ -25,3 +25,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
25
  *.zip filter=lfs diff=lfs merge=lfs -text
26
  *.zstandard filter=lfs diff=lfs merge=lfs -text
27
  *tfevents* filter=lfs diff=lfs merge=lfs -text
28
+ dev.jsonl.gz filter=lfs diff=lfs merge=lfs -text
29
+ train.jsonl.gz filter=lfs diff=lfs merge=lfs -text
dev.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1a66e80d379ccda0f899ae40a1600c99ebdc044838a1a14da01188519c34ad9
3
+ size 134665
msmarco-passage.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.Wikipedia
15
+
16
+ # Lint as: python3
17
+ """MsMarco Passage dataset."""
18
+
19
+ import json
20
+
21
+ import datasets
22
+
23
+ _CITATION = """
24
+ @misc{bajaj2018ms,
25
+ title={MS MARCO: A Human Generated MAchine Reading COmprehension Dataset},
26
+ author={Payal Bajaj and Daniel Campos and Nick Craswell and Li Deng and Jianfeng Gao and Xiaodong Liu
27
+ and Rangan Majumder and Andrew McNamara and Bhaskar Mitra and Tri Nguyen and Mir Rosenberg and Xia Song
28
+ and Alina Stoica and Saurabh Tiwary and Tong Wang},
29
+ year={2018},
30
+ eprint={1611.09268},
31
+ archivePrefix={arXiv},
32
+ primaryClass={cs.CL}
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = "dataset load script for MSMARCO Passage"
37
+
38
+ _DATASET_URLS = {
39
+ 'train': "https://huggingface.co/datasets/tevatron/msmarco-passage/resolve/main/train.jsonl.gz",
40
+ 'dev': "https://huggingface.co/datasets/tevatron/msmarco-passage/resolve/main/dev.jsonl.gz",
41
+ }
42
+
43
+
44
+ class MsMarcoPassage(datasets.GeneratorBasedBuilder):
45
+ VERSION = datasets.Version("0.0.1")
46
+
47
+ BUILDER_CONFIGS = [
48
+ datasets.BuilderConfig(version=VERSION,
49
+ description="MS MARCO passage train/dev datasets"),
50
+ ]
51
+
52
+ def _info(self):
53
+ features = datasets.Features({
54
+ 'query_id': datasets.Value('string'),
55
+ 'query': datasets.Value('string'),
56
+ 'positive_passages': [
57
+ {'docid': datasets.Value('string'), 'text': datasets.Value('string')}
58
+ ],
59
+ 'negative_passages': [
60
+ {'docid': datasets.Value('string'), 'text': datasets.Value('string')}
61
+ ],
62
+ })
63
+ return datasets.DatasetInfo(
64
+ # This is the description that will appear on the datasets page.
65
+ description=_DESCRIPTION,
66
+ # This defines the different columns of the dataset and their types
67
+ features=features, # Here we define them above because they are different between the two configurations
68
+ supervised_keys=None,
69
+ # Homepage of the dataset for documentation
70
+ homepage="",
71
+ # License for the dataset if available
72
+ license="",
73
+ # Citation for the dataset
74
+ citation=_CITATION,
75
+ )
76
+
77
+ def _split_generators(self, dl_manager):
78
+ downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
79
+ splits = [
80
+ datasets.SplitGenerator(
81
+ name="train",
82
+ gen_kwargs={
83
+ "filepath": downloaded_files["train"],
84
+ },
85
+ ),
86
+ datasets.SplitGenerator(
87
+ name='dev',
88
+ gen_kwargs={
89
+ "filepath": downloaded_files["dev"],
90
+ },
91
+ ),
92
+ ]
93
+ return splits
94
+
95
+ def _generate_examples(self, filepath):
96
+ """Yields examples."""
97
+ with open(filepath, encoding="utf-8") as f:
98
+ for line in f:
99
+ data = json.loads(line)
100
+ if data.get('negative_passages') is None:
101
+ data['negative_passages'] = []
102
+ if data.get('positive_passages') is None:
103
+ data['positive_passages'] = []
104
+ yield data['query_id'], data
train.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b91ca4c814e0b68b5296c3eb51df4c08ea94e643feaa91983cae732daba1df3b
3
+ size 1598465575