Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
10M<n<100M
Language Creators:
found
Annotations Creators:
crowdsourced
ArXiv:
Tags:
License:
albertvillanova HF staff commited on
Commit
e12fccd
1 Parent(s): 7c144b1

Delete loading script

Browse files
Files changed (1) hide show
  1. asnq.py +0 -151
asnq.py DELETED
@@ -1,151 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Answer-Sentence Natural Questions (ASNQ)
16
-
17
- ASNQ is a dataset for answer sentence selection derived from Google's
18
- Natural Questions (NQ) dataset (Kwiatkowski et al. 2019). It converts
19
- NQ's dataset into an AS2 (answer-sentence-selection) format.
20
-
21
- The dataset details can be found in the paper at
22
- https://arxiv.org/abs/1911.04118
23
-
24
- The dataset can be downloaded at
25
- https://d3t7erp6ge410c.cloudfront.net/tanda-aaai-2020/data/asnq.tar
26
-
27
- """
28
-
29
-
30
- import csv
31
- import os
32
-
33
- import datasets
34
-
35
-
36
- _CITATION = """\
37
- @article{garg2019tanda,
38
- title={TANDA: Transfer and Adapt Pre-Trained Transformer Models for Answer Sentence Selection},
39
- author={Siddhant Garg and Thuy Vu and Alessandro Moschitti},
40
- year={2019},
41
- eprint={1911.04118},
42
- }
43
- """
44
-
45
- _DESCRIPTION = """\
46
- ASNQ is a dataset for answer sentence selection derived from
47
- Google's Natural Questions (NQ) dataset (Kwiatkowski et al. 2019).
48
-
49
- Each example contains a question, candidate sentence, label indicating whether or not
50
- the sentence answers the question, and two additional features --
51
- sentence_in_long_answer and short_answer_in_sentence indicating whether ot not the
52
- candidate sentence is contained in the long_answer and if the short_answer is in the candidate sentence.
53
-
54
- For more details please see
55
- https://arxiv.org/pdf/1911.04118.pdf
56
-
57
- and
58
-
59
- https://research.google/pubs/pub47761/
60
- """
61
-
62
- _URL = "data/asnq.zip"
63
-
64
-
65
- class ASNQ(datasets.GeneratorBasedBuilder):
66
- """ASNQ is a dataset for answer sentence selection derived
67
- ASNQ is a dataset for answer sentence selection derived from
68
- Google's Natural Questions (NQ) dataset (Kwiatkowski et al. 2019).
69
-
70
- The dataset details can be found in the paper:
71
- https://arxiv.org/abs/1911.04118
72
- """
73
-
74
- VERSION = datasets.Version("1.0.0")
75
-
76
- def _info(self):
77
-
78
- return datasets.DatasetInfo(
79
- # This is the description that will appear on the datasets page.
80
- description=_DESCRIPTION,
81
- # This defines the different columns of the dataset and their types
82
- features=datasets.Features(
83
- {
84
- "question": datasets.Value("string"),
85
- "sentence": datasets.Value("string"),
86
- "label": datasets.ClassLabel(names=["neg", "pos"]),
87
- "sentence_in_long_answer": datasets.Value("bool"),
88
- "short_answer_in_sentence": datasets.Value("bool"),
89
- }
90
- ),
91
- # No default supervised_keys
92
- supervised_keys=None,
93
- # Homepage of the dataset for documentation
94
- homepage="https://github.com/alexa/wqa_tanda#answer-sentence-natural-questions-asnq",
95
- citation=_CITATION,
96
- )
97
-
98
- def _split_generators(self, dl_manager):
99
- """Returns SplitGenerators."""
100
- # dl_manager is a datasets.download.DownloadManager that can be used to
101
- # download and extract URLs
102
- dl_dir = dl_manager.download_and_extract(_URL)
103
- data_dir = os.path.join(dl_dir, "data", "asnq")
104
- return [
105
- datasets.SplitGenerator(
106
- name=datasets.Split.TRAIN,
107
- # These kwargs will be passed to _generate_examples
108
- gen_kwargs={
109
- "filepath": os.path.join(data_dir, "train.tsv"),
110
- "split": "train",
111
- },
112
- ),
113
- datasets.SplitGenerator(
114
- name=datasets.Split.VALIDATION,
115
- # These kwargs will be passed to _generate_examples
116
- gen_kwargs={
117
- "filepath": os.path.join(data_dir, "dev.tsv"),
118
- "split": "dev",
119
- },
120
- ),
121
- ]
122
-
123
- def _generate_examples(self, filepath, split):
124
- """Yields examples.
125
-
126
- Original dataset contains labels '1', '2', '3' and '4', with labels
127
- '1', '2' and '3' considered negative (sentence does not answer the question),
128
- and label '4' considered positive (sentence does answer the question).
129
- We map these labels to two classes, returning the other properties as additional
130
- features."""
131
-
132
- # Mapping of dataset's original labels to a tuple of
133
- # (label, sentence_in_long_answer, short_answer_in_sentence)
134
- label_map = {
135
- "1": ("neg", False, False),
136
- "2": ("neg", False, True),
137
- "3": ("neg", True, False),
138
- "4": ("pos", True, True),
139
- }
140
- with open(filepath, encoding="utf-8") as tsvfile:
141
- tsvreader = csv.reader(tsvfile, delimiter="\t")
142
- for id_, row in enumerate(tsvreader):
143
- question, sentence, orig_label = row
144
- label, sentence_in_long_answer, short_answer_in_sentence = label_map[orig_label]
145
- yield id_, {
146
- "question": question,
147
- "sentence": sentence,
148
- "label": label,
149
- "sentence_in_long_answer": sentence_in_long_answer,
150
- "short_answer_in_sentence": short_answer_in_sentence,
151
- }