MrLight commited on
Commit
66853f7
1 Parent(s): 2a1b08f

Upload folder using huggingface_hub

Browse files
Files changed (6) hide show
  1. dev.jsonl.gz +3 -0
  2. dl19.jsonl.gz +3 -0
  3. dl20.jsonl.gz +3 -0
  4. msmarco-passage.py +105 -0
  5. prepare_dl.py +18 -0
  6. train.jsonl.gz +3 -0
dev.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d1a66e80d379ccda0f899ae40a1600c99ebdc044838a1a14da01188519c34ad9
3
+ size 134665
dl19.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ba68c26c3ebd657550e3132226bc3721eb8c78a2706d7bd63ada49fe8b78065
3
+ size 1161
dl20.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:331e829922e2bd8ee1f01766fb7253c0d672ef79498011e91ad5e3e93f17a54a
3
+ size 1492
msmarco-passage.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.Wikipedia
15
+
16
+ # Lint as: python3
17
+ """MsMarco Passage dataset."""
18
+
19
+ import json
20
+
21
+ import datasets
22
+
23
+ _CITATION = """
24
+ @misc{bajaj2018ms,
25
+ title={MS MARCO: A Human Generated MAchine Reading COmprehension Dataset},
26
+ author={Payal Bajaj and Daniel Campos and Nick Craswell and Li Deng and Jianfeng Gao and Xiaodong Liu
27
+ and Rangan Majumder and Andrew McNamara and Bhaskar Mitra and Tri Nguyen and Mir Rosenberg and Xia Song
28
+ and Alina Stoica and Saurabh Tiwary and Tong Wang},
29
+ year={2018},
30
+ eprint={1611.09268},
31
+ archivePrefix={arXiv},
32
+ primaryClass={cs.CL}
33
+ }
34
+ """
35
+
36
+ _DESCRIPTION = "dataset load script for MSMARCO Passage"
37
+
38
+ _DATASET_URLS = {
39
+ 'train': "https://huggingface.co/datasets/Tevatron/msmarco-passage/resolve/main/train.jsonl.gz",
40
+ #'train': "https://www.dropbox.com/s/seqqbu90jopvtq5/msmarco_passage_train.json",
41
+ 'dev': "https://huggingface.co/datasets/Tevatron/msmarco-passage/resolve/main/dev.jsonl.gz",
42
+ 'dl19': "https://huggingface.co/datasets/Tevatron/msmarco-passage/resolve/main/dl19.jsonl.gz",
43
+ 'dl20': "https://huggingface.co/datasets/Tevatron/msmarco-passage/resolve/main/dl20.jsonl.gz",
44
+ }
45
+
46
+
47
+ class MsMarcoPassage(datasets.GeneratorBasedBuilder):
48
+ VERSION = datasets.Version("0.0.1")
49
+
50
+ BUILDER_CONFIGS = [
51
+ datasets.BuilderConfig(version=VERSION,
52
+ description="MS MARCO passage train/dev datasets"),
53
+ ]
54
+
55
+ def _info(self):
56
+ features = datasets.Features({
57
+ 'query_id': datasets.Value('string'),
58
+ 'query': datasets.Value('string'),
59
+ 'positive_passages': [
60
+ {'docid': datasets.Value('string'), 'title': datasets.Value('string'), 'text': datasets.Value('string')}
61
+ ],
62
+ 'negative_passages': [
63
+ {'docid': datasets.Value('string'), 'title': datasets.Value('string'), 'text': datasets.Value('string')}
64
+ ],
65
+ })
66
+ return datasets.DatasetInfo(
67
+ # This is the description that will appear on the datasets page.
68
+ description=_DESCRIPTION,
69
+ # This defines the different columns of the dataset and their types
70
+ features=features, # Here we define them above because they are different between the two configurations
71
+ supervised_keys=None,
72
+ # Homepage of the dataset for documentation
73
+ homepage="",
74
+ # License for the dataset if available
75
+ license="",
76
+ # Citation for the dataset
77
+ citation=_CITATION,
78
+ )
79
+
80
+ def _split_generators(self, dl_manager):
81
+ if self.config.data_files:
82
+ downloaded_files = self.config.data_files
83
+ else:
84
+ downloaded_files = dl_manager.download_and_extract(_DATASET_URLS)
85
+ splits = [
86
+ datasets.SplitGenerator(
87
+ name=split,
88
+ gen_kwargs={
89
+ "files": [downloaded_files[split]] if isinstance(downloaded_files[split], str) else downloaded_files[split],
90
+ },
91
+ ) for split in downloaded_files
92
+ ]
93
+ return splits
94
+
95
+ def _generate_examples(self, files):
96
+ """Yields examples."""
97
+ for filepath in files:
98
+ with open(filepath, encoding="utf-8") as f:
99
+ for line in f:
100
+ data = json.loads(line)
101
+ if data.get('negative_passages') is None:
102
+ data['negative_passages'] = []
103
+ if data.get('positive_passages') is None:
104
+ data['positive_passages'] = []
105
+ yield data['query_id'], data
prepare_dl.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pyserini.search import get_topics, get_qrels
2
+ import json
3
+
4
+
5
+ topics = get_topics('dl19-passage')
6
+ qrels = get_qrels('dl19-passage')
7
+ with open('dl19.jsonl', 'w') as f:
8
+ for topic_id in qrels:
9
+ query_id = topic_id
10
+ query = topics[query_id]['title']
11
+ positive_passages = []
12
+ negative_passages = []
13
+ f.write(json.dumps({
14
+ 'query_id': str(query_id),
15
+ 'query': query,
16
+ 'positive_passages': positive_passages,
17
+ 'negative_passages': negative_passages,
18
+ }) + '\n')
train.jsonl.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:72430ded5e32476a11eaef5dcd0950e2cf564bf8de1f3625d7be0588eaaceee8
3
+ size 2155342997