Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:
system HF staff commited on
Commit
134d7a3
0 Parent(s):

Update files from the datasets library (from 1.1.3)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.1.3

Files changed (4) hide show
  1. .gitattributes +27 -0
  2. asnq.py +151 -0
  3. dataset_infos.json +1 -0
  4. dummy/1.0.0/dummy_data.zip +3 -0
.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
asnq.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Answer-Sentence Natural Questions (ASNQ)
16
+
17
+ ASNQ is a dataset for answer sentence selection derived from Google's
18
+ Natural Questions (NQ) dataset (Kwiatkowski et al. 2019). It converts
19
+ NQ's dataset into an AS2 (answer-sentence-selection) format.
20
+
21
+ The dataset details can be found in the paper at
22
+ https://arxiv.org/abs/1911.04118
23
+
24
+ The dataset can be downloaded at
25
+ https://wqa-public.s3.amazonaws.com/tanda-aaai-2020/data/asnq.tar
26
+ """
27
+
28
+ from __future__ import absolute_import, division, print_function
29
+
30
+ import csv
31
+ import os
32
+
33
+ import datasets
34
+
35
+
36
+ _CITATION = """\
37
+ @article{garg2019tanda,
38
+ title={TANDA: Transfer and Adapt Pre-Trained Transformer Models for Answer Sentence Selection},
39
+ author={Siddhant Garg and Thuy Vu and Alessandro Moschitti},
40
+ year={2019},
41
+ eprint={1911.04118},
42
+ }
43
+ """
44
+
45
+ _DESCRIPTION = """\
46
+ ASNQ is a dataset for answer sentence selection derived from
47
+ Google's Natural Questions (NQ) dataset (Kwiatkowski et al. 2019).
48
+
49
+ Each example contains a question, candidate sentence, label indicating whether or not
50
+ the sentence answers the question, and two additional features --
51
+ sentence_in_long_answer and short_answer_in_sentence indicating whether ot not the
52
+ candidate sentence is contained in the long_answer and if the short_answer is in the candidate sentence.
53
+
54
+ For more details please see
55
+ https://arxiv.org/pdf/1911.04118.pdf
56
+
57
+ and
58
+
59
+ https://research.google/pubs/pub47761/
60
+ """
61
+
62
+ _URL = "https://wqa-public.s3.amazonaws.com/tanda-aaai-2020/data/asnq.tar"
63
+
64
+
65
+ class ASNQ(datasets.GeneratorBasedBuilder):
66
+ """ASNQ is a dataset for answer sentence selection derived
67
+ ASNQ is a dataset for answer sentence selection derived from
68
+ Google's Natural Questions (NQ) dataset (Kwiatkowski et al. 2019).
69
+
70
+ The dataset details can be found in the paper:
71
+ https://arxiv.org/abs/1911.04118
72
+ """
73
+
74
+ VERSION = datasets.Version("1.0.0")
75
+
76
+ def _info(self):
77
+
78
+ return datasets.DatasetInfo(
79
+ # This is the description that will appear on the datasets page.
80
+ description=_DESCRIPTION,
81
+ # This defines the different columns of the dataset and their types
82
+ features=datasets.Features(
83
+ {
84
+ "question": datasets.Value("string"),
85
+ "sentence": datasets.Value("string"),
86
+ "label": datasets.ClassLabel(names=["neg", "pos"]),
87
+ "sentence_in_long_answer": datasets.Value("bool"),
88
+ "short_answer_in_sentence": datasets.Value("bool"),
89
+ }
90
+ ),
91
+ # No default supervised_keys
92
+ supervised_keys=None,
93
+ # Homepage of the dataset for documentation
94
+ homepage="https://github.com/alexa/wqa_tanda#answer-sentence-natural-questions-asnq",
95
+ citation=_CITATION,
96
+ )
97
+
98
+ def _split_generators(self, dl_manager):
99
+ """Returns SplitGenerators."""
100
+ # dl_manager is a datasets.download.DownloadManager that can be used to
101
+ # download and extract URLs
102
+ dl_dir = dl_manager.download_and_extract(_URL)
103
+ data_dir = os.path.join(dl_dir, "data", "asnq")
104
+ return [
105
+ datasets.SplitGenerator(
106
+ name=datasets.Split.TRAIN,
107
+ # These kwargs will be passed to _generate_examples
108
+ gen_kwargs={
109
+ "filepath": os.path.join(data_dir, "train.tsv"),
110
+ "split": "train",
111
+ },
112
+ ),
113
+ datasets.SplitGenerator(
114
+ name=datasets.Split.VALIDATION,
115
+ # These kwargs will be passed to _generate_examples
116
+ gen_kwargs={
117
+ "filepath": os.path.join(data_dir, "dev.tsv"),
118
+ "split": "dev",
119
+ },
120
+ ),
121
+ ]
122
+
123
+ def _generate_examples(self, filepath, split):
124
+ """Yields examples.
125
+
126
+ Original dataset contains labels '1', '2', '3' and '4', with labels
127
+ '1', '2' and '3' considered negative (sentence does not answer the question),
128
+ and label '4' considered positive (sentence does answer the question).
129
+ We map these labels to two classes, returning the other properties as additional
130
+ features."""
131
+
132
+ # Mapping of dataset's original labels to a tuple of
133
+ # (label, sentence_in_long_answer, short_answer_in_sentence)
134
+ label_map = {
135
+ "1": ("neg", False, False),
136
+ "2": ("neg", False, True),
137
+ "3": ("neg", True, False),
138
+ "4": ("pos", True, True),
139
+ }
140
+ with open(filepath, encoding="utf-8") as tsvfile:
141
+ tsvreader = csv.reader(tsvfile, delimiter="\t")
142
+ for id_, row in enumerate(tsvreader):
143
+ question, sentence, orig_label = row
144
+ label, sentence_in_long_answer, short_answer_in_sentence = label_map[orig_label]
145
+ yield id_, {
146
+ "question": question,
147
+ "sentence": sentence,
148
+ "label": label,
149
+ "sentence_in_long_answer": sentence_in_long_answer,
150
+ "short_answer_in_sentence": short_answer_in_sentence,
151
+ }
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"default": {"description": "ASNQ is a dataset for answer sentence selection derived from\nGoogle's Natural Questions (NQ) dataset (Kwiatkowski et al. 2019).\n\nEach example contains a question, candidate sentence, label indicating whether or not\nthe sentence answers the question, and two additional features -- \nsentence_in_long_answer and short_answer_in_sentence indicating whether ot not the \ncandidate sentence is contained in the long_answer and if the short_answer is in the candidate sentence.\n\nFor more details please see \nhttps://arxiv.org/pdf/1911.04118.pdf\n\nand \n\nhttps://research.google/pubs/pub47761/\n", "citation": "@article{garg2019tanda,\n title={TANDA: Transfer and Adapt Pre-Trained Transformer Models for Answer Sentence Selection},\n author={Siddhant Garg and Thuy Vu and Alessandro Moschitti},\n year={2019},\n eprint={1911.04118},\n}\n", "homepage": "https://github.com/alexa/wqa_tanda#answer-sentence-natural-questions-asnq", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["neg", "pos"], "names_file": null, "id": null, "_type": "ClassLabel"}, "sentence_in_long_answer": {"dtype": "bool", "id": null, "_type": "Value"}, "short_answer_in_sentence": {"dtype": "bool", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "builder_name": "asnq", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3656881376, "num_examples": 20377568, "dataset_name": "asnq"}, "validation": {"name": "validation", "num_bytes": 168005155, "num_examples": 930062, "dataset_name": "asnq"}}, "download_checksums": {"https://wqa-public.s3.amazonaws.com/tanda-aaai-2020/data/asnq.tar": {"num_bytes": 3563857920, "checksum": "4211d3e507e7cfa345a9eea3c5222b7d79fd963cf27407555c5558c37344ddf1"}}, "download_size": 3563857920, "post_processing_size": null, "dataset_size": 3824886531, "size_in_bytes": 7388744451}}
dummy/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89aaa94ab61bc915801e4e404a60de81cf58d579f59b40aae56cfa5d70f3b9a3
3
+ size 2926