albertvillanova HF staff commited on
Commit
32291fc
1 Parent(s): 772b8e0

Convert dataset to Parquet (#5)

Browse files

- Convert dataset to Parquet (7c144b15ac9d1712b2b688c80f833a4e51bf5d49)
- Delete legacy dataset_infos.json (c7871a3583257342d8c831236e6dc01365b4feb3)
- Delete loading script (e12fccdedc661bdf6a5950e304501db04f02fc9b)
- Delete data folder (7c5b4cdb6ffa402a54b8c6e5b36297bca673c99d)
- Revert "Delete data folder" (aaf703d4b04c9ca03e063418baf5c15474b878a6)
- Delete data file (1cdc2ed4acbd99bb5631b596ab13190bbbbe6537)

README.md CHANGED
@@ -42,8 +42,15 @@ dataset_info:
42
  - name: validation
43
  num_bytes: 168004403
44
  num_examples: 930062
45
- download_size: 1482064429
46
  dataset_size: 3824869475
 
 
 
 
 
 
 
47
  ---
48
 
49
  # Dataset Card for "asnq"
42
  - name: validation
43
  num_bytes: 168004403
44
  num_examples: 930062
45
+ download_size: 2496835395
46
  dataset_size: 3824869475
47
+ configs:
48
+ - config_name: default
49
+ data_files:
50
+ - split: train
51
+ path: data/train-*
52
+ - split: validation
53
+ path: data/validation-*
54
  ---
55
 
56
  # Dataset Card for "asnq"
asnq.py DELETED
@@ -1,151 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """Answer-Sentence Natural Questions (ASNQ)
16
-
17
- ASNQ is a dataset for answer sentence selection derived from Google's
18
- Natural Questions (NQ) dataset (Kwiatkowski et al. 2019). It converts
19
- NQ's dataset into an AS2 (answer-sentence-selection) format.
20
-
21
- The dataset details can be found in the paper at
22
- https://arxiv.org/abs/1911.04118
23
-
24
- The dataset can be downloaded at
25
- https://d3t7erp6ge410c.cloudfront.net/tanda-aaai-2020/data/asnq.tar
26
-
27
- """
28
-
29
-
30
- import csv
31
- import os
32
-
33
- import datasets
34
-
35
-
36
- _CITATION = """\
37
- @article{garg2019tanda,
38
- title={TANDA: Transfer and Adapt Pre-Trained Transformer Models for Answer Sentence Selection},
39
- author={Siddhant Garg and Thuy Vu and Alessandro Moschitti},
40
- year={2019},
41
- eprint={1911.04118},
42
- }
43
- """
44
-
45
- _DESCRIPTION = """\
46
- ASNQ is a dataset for answer sentence selection derived from
47
- Google's Natural Questions (NQ) dataset (Kwiatkowski et al. 2019).
48
-
49
- Each example contains a question, candidate sentence, label indicating whether or not
50
- the sentence answers the question, and two additional features --
51
- sentence_in_long_answer and short_answer_in_sentence indicating whether ot not the
52
- candidate sentence is contained in the long_answer and if the short_answer is in the candidate sentence.
53
-
54
- For more details please see
55
- https://arxiv.org/pdf/1911.04118.pdf
56
-
57
- and
58
-
59
- https://research.google/pubs/pub47761/
60
- """
61
-
62
- _URL = "data/asnq.zip"
63
-
64
-
65
- class ASNQ(datasets.GeneratorBasedBuilder):
66
- """ASNQ is a dataset for answer sentence selection derived
67
- ASNQ is a dataset for answer sentence selection derived from
68
- Google's Natural Questions (NQ) dataset (Kwiatkowski et al. 2019).
69
-
70
- The dataset details can be found in the paper:
71
- https://arxiv.org/abs/1911.04118
72
- """
73
-
74
- VERSION = datasets.Version("1.0.0")
75
-
76
- def _info(self):
77
-
78
- return datasets.DatasetInfo(
79
- # This is the description that will appear on the datasets page.
80
- description=_DESCRIPTION,
81
- # This defines the different columns of the dataset and their types
82
- features=datasets.Features(
83
- {
84
- "question": datasets.Value("string"),
85
- "sentence": datasets.Value("string"),
86
- "label": datasets.ClassLabel(names=["neg", "pos"]),
87
- "sentence_in_long_answer": datasets.Value("bool"),
88
- "short_answer_in_sentence": datasets.Value("bool"),
89
- }
90
- ),
91
- # No default supervised_keys
92
- supervised_keys=None,
93
- # Homepage of the dataset for documentation
94
- homepage="https://github.com/alexa/wqa_tanda#answer-sentence-natural-questions-asnq",
95
- citation=_CITATION,
96
- )
97
-
98
- def _split_generators(self, dl_manager):
99
- """Returns SplitGenerators."""
100
- # dl_manager is a datasets.download.DownloadManager that can be used to
101
- # download and extract URLs
102
- dl_dir = dl_manager.download_and_extract(_URL)
103
- data_dir = os.path.join(dl_dir, "data", "asnq")
104
- return [
105
- datasets.SplitGenerator(
106
- name=datasets.Split.TRAIN,
107
- # These kwargs will be passed to _generate_examples
108
- gen_kwargs={
109
- "filepath": os.path.join(data_dir, "train.tsv"),
110
- "split": "train",
111
- },
112
- ),
113
- datasets.SplitGenerator(
114
- name=datasets.Split.VALIDATION,
115
- # These kwargs will be passed to _generate_examples
116
- gen_kwargs={
117
- "filepath": os.path.join(data_dir, "dev.tsv"),
118
- "split": "dev",
119
- },
120
- ),
121
- ]
122
-
123
- def _generate_examples(self, filepath, split):
124
- """Yields examples.
125
-
126
- Original dataset contains labels '1', '2', '3' and '4', with labels
127
- '1', '2' and '3' considered negative (sentence does not answer the question),
128
- and label '4' considered positive (sentence does answer the question).
129
- We map these labels to two classes, returning the other properties as additional
130
- features."""
131
-
132
- # Mapping of dataset's original labels to a tuple of
133
- # (label, sentence_in_long_answer, short_answer_in_sentence)
134
- label_map = {
135
- "1": ("neg", False, False),
136
- "2": ("neg", False, True),
137
- "3": ("neg", True, False),
138
- "4": ("pos", True, True),
139
- }
140
- with open(filepath, encoding="utf-8") as tsvfile:
141
- tsvreader = csv.reader(tsvfile, delimiter="\t")
142
- for id_, row in enumerate(tsvreader):
143
- question, sentence, orig_label = row
144
- label, sentence_in_long_answer, short_answer_in_sentence = label_map[orig_label]
145
- yield id_, {
146
- "question": question,
147
- "sentence": sentence,
148
- "label": label,
149
- "sentence_in_long_answer": sentence_in_long_answer,
150
- "short_answer_in_sentence": short_answer_in_sentence,
151
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data/{asnq.zip → train-00000-of-00008.parquet} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4d1b3ee889be3d2a090d075a7494cee339f6fcfae6bc16ce5d06ad66874e591c
3
- size 1482064429
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c51e6cd0278c630e2094d9cdd5502fb3f35e60e260477a969832b8422cf5574e
3
+ size 299139770
data/train-00001-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b74a1689c47410893d8e944a994bd8e610ab173cb9e9255e225d1777b7d0859a
3
+ size 298212695
data/train-00002-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9dd2783190464db93960c13fd8e99e949b52add2bdb4b5a9267e8601cfaef53
3
+ size 299688447
data/train-00003-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7ff76549b7fc5e055419e274a1b28a277ac2742ed15298f47ee287530323407
3
+ size 299173033
data/train-00004-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1baa386b5d338dea200eec4aebf3f553bf5591b059a0efbf587857c8713f3e95
3
+ size 299751866
data/train-00005-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15c4c802114cfd6db510dbcecd5155c58b92758b06c01fd91fe94bfcb411b9cb
3
+ size 299035267
data/train-00006-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce69b66214e21c3330a1836e652742876615e462e59b973fdfd89ee21c38303b
3
+ size 299086683
data/train-00007-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b090f7299765d9101cc87ec2b29b05b13e96acd6e6717c91282b06f0e606e567
3
+ size 299236601
data/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c400a3e7bbd1b1f184bbc3bda3747b3946ccfa29e76c7ce2841613601515fed6
3
+ size 103511033
dataset_infos.json DELETED
@@ -1 +0,0 @@
1
- {"default": {"description": "ASNQ is a dataset for answer sentence selection derived from\nGoogle's Natural Questions (NQ) dataset (Kwiatkowski et al. 2019).\n\nEach example contains a question, candidate sentence, label indicating whether or not\nthe sentence answers the question, and two additional features --\nsentence_in_long_answer and short_answer_in_sentence indicating whether ot not the\ncandidate sentence is contained in the long_answer and if the short_answer is in the candidate sentence.\n\nFor more details please see\nhttps://arxiv.org/pdf/1911.04118.pdf\n\nand\n\nhttps://research.google/pubs/pub47761/\n", "citation": "@article{garg2019tanda,\n title={TANDA: Transfer and Adapt Pre-Trained Transformer Models for Answer Sentence Selection},\n author={Siddhant Garg and Thuy Vu and Alessandro Moschitti},\n year={2019},\n eprint={1911.04118},\n}\n", "homepage": "https://github.com/alexa/wqa_tanda#answer-sentence-natural-questions-asnq", "license": "", "features": {"question": {"dtype": "string", "id": null, "_type": "Value"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}, "label": {"num_classes": 2, "names": ["neg", "pos"], "id": null, "_type": "ClassLabel"}, "sentence_in_long_answer": {"dtype": "bool", "id": null, "_type": "Value"}, "short_answer_in_sentence": {"dtype": "bool", "id": null, "_type": "Value"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "asnq", "config_name": "default", "version": {"version_str": "1.0.0", "description": null, "major": 1, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 3656865072, "num_examples": 20377568, "dataset_name": "asnq"}, "validation": {"name": "validation", "num_bytes": 168004403, "num_examples": 930062, "dataset_name": "asnq"}}, "download_checksums": {"data/asnq.zip": {"num_bytes": 1482064429, "checksum": "4d1b3ee889be3d2a090d075a7494cee339f6fcfae6bc16ce5d06ad66874e591c"}}, "download_size": 1482064429, "post_processing_size": null, "dataset_size": 3824869475, "size_in_bytes": 5306933904}}