Datasets:

Languages:
English
Multilinguality:
monolingual
Size Categories:
1M<n<10M
Language Creators:
machine-generated
Annotations Creators:
expert-generated
Source Datasets:
original
ArXiv:
Tags:
License:
system HF staff commited on
Commit
ca81e23
1 Parent(s): f6d6f92

Update files from the datasets library (from 1.12.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.12.0

README.md CHANGED
@@ -18,6 +18,7 @@ task_categories:
18
  task_ids:
19
  - open-domain-qa
20
  paperswithcode_id: gooaq
 
21
  ---
22
 
23
  # Dataset Card for GooAQ
@@ -108,11 +109,14 @@ Here is the dominant types in the current dataset:
108
 
109
  ### Data Splits
110
 
111
- This dataset is split into train set. Number of samples in train set is given below:
 
 
 
 
 
 
112
 
113
- | | Train |
114
- | ----- | ------ |
115
- | Gooaq | 5030530|
116
 
117
  ## Dataset Creation
118
 
18
  task_ids:
19
  - open-domain-qa
20
  paperswithcode_id: gooaq
21
+ pretty_name: 'GooAQ: Open Question Answering with Diverse Answer Types'
22
  ---
23
 
24
  # Dataset Card for GooAQ
109
 
110
  ### Data Splits
111
 
112
+ Number of samples in train/validation/test set are given below:
113
+
114
+ | Split | Number of samples |
115
+ |------------|-------------------|
116
+ | Train | 3112679 |
117
+ | Validation | 2500 |
118
+ | Test | 2500 |
119
 
 
 
 
120
 
121
  ## Dataset Creation
122
 
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"default": {"description": "GooAQ is a large-scale dataset with a variety of answer types. This dataset contains over\n5 million questions and 3 million answers collected from Google. GooAQ questions are collected\nsemi-automatically from the Google search engine using its autocomplete feature. This results in\nnaturalistic questions of practical interest that are nonetheless short and expressed using simple\nlanguage. GooAQ answers are mined from Google's responses to our collected questions, specifically from\nthe answer boxes in the search results. This yields a rich space of answer types, containing both\ntextual answers (short and long) as well as more structured ones such as collections.\n", "citation": "@article{gooaq2021,\n title={GooAQ: Open Question Answering with Diverse Answer Types},\n author={Khashabi, Daniel and Ng, Amos and Khot, Tushar and Sabharwal, Ashish and Hajishirzi, Hannaneh and Callison-Burch, Chris},\n journal={arXiv preprint},\n year={2021}\n}\n", "homepage": "https://github.com/allenai/gooaq", "license": "Licensed under the Apache License, Version 2.0", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "short_answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_type": {"num_classes": 6, "names": ["feat_snip", "collection", "knowledge", "unit_conv", "time_conv", "curr_conv"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "builder_name": "gooaq", "config_name": "default", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 1102827066, "num_examples": 5030530, "dataset_name": "gooaq"}}, "download_checksums": {"https://github.com/allenai/gooaq/raw/main/data/qoogle.jsonl": {"num_bytes": 1467162788, "checksum": "7c57029dbac90db21c7abcb3dcdbf9cd9f83f9a1d24815a2d8c0663fe13e4a17"}}, "download_size": 1467162788, "post_processing_size": null, "dataset_size": 1102827066, "size_in_bytes": 2569989854}}
1
+ {"default": {"description": "GooAQ is a large-scale dataset with a variety of answer types. This dataset contains over\n5 million questions and 3 million answers collected from Google. GooAQ questions are collected\nsemi-automatically from the Google search engine using its autocomplete feature. This results in\nnaturalistic questions of practical interest that are nonetheless short and expressed using simple\nlanguage. GooAQ answers are mined from Google's responses to our collected questions, specifically from\nthe answer boxes in the search results. This yields a rich space of answer types, containing both\ntextual answers (short and long) as well as more structured ones such as collections.\n", "citation": "@article{gooaq2021,\n title={GooAQ: Open Question Answering with Diverse Answer Types},\n author={Khashabi, Daniel and Ng, Amos and Khot, Tushar and Sabharwal, Ashish and Hajishirzi, Hannaneh and Callison-Burch, Chris},\n journal={arXiv preprint},\n year={2021}\n}\n", "homepage": "https://github.com/allenai/gooaq", "license": "Licensed under the Apache License, Version 2.0", "features": {"id": {"dtype": "int32", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "short_answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer": {"dtype": "string", "id": null, "_type": "Value"}, "answer_type": {"num_classes": 6, "names": ["feat_snip", "collection", "knowledge", "unit_conv", "time_conv", "curr_conv"], "names_file": null, "id": null, "_type": "ClassLabel"}}, "post_processed": null, "supervised_keys": null, "task_templates": null, "builder_name": "gooaq", "config_name": "default", "version": {"version_str": "1.2.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 974320061, "num_examples": 3112679, "dataset_name": "gooaq"}, "validation": {"name": "validation", "num_bytes": 444553, "num_examples": 2500, "dataset_name": "gooaq"}, "test": {"name": "test", "num_bytes": 445810, "num_examples": 2500, "dataset_name": "gooaq"}}, "download_checksums": {"https://github.com/allenai/gooaq/raw/main/data/gooaq.jsonl": {"num_bytes": 1920133810, "checksum": "d68007293be8740a7a7388efa8ea30ae5a3232d18a340a63f0190e07942d9da2"}, "https://github.com/allenai/gooaq/raw/main/data/split.json": {"num_bytes": 191225091, "checksum": "728921af66afb7b2c04466795e595586ad1f92bbd15a879b47fc59aaca8826db"}}, "download_size": 2111358901, "post_processing_size": null, "dataset_size": 975210424, "size_in_bytes": 3086569325}}
dummy/{1.1.0 → 1.2.0}/dummy_data.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:996f63a67e25d9b12a5f5c441d638071a9d198e6915f658cb1550ab4361a1e0b
3
- size 428
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81aefd9dcc56e7960bb1b33004b01858a5147e1eb621ce2deb41c9e92e0fef51
3
+ size 2296
gooaq.py CHANGED
@@ -17,6 +17,8 @@
17
 
18
  import json
19
 
 
 
20
  import datasets
21
 
22
 
@@ -43,13 +45,15 @@ _HOMEPAGE = "https://github.com/allenai/gooaq"
43
 
44
  _LICENSE = "Licensed under the Apache License, Version 2.0"
45
 
46
- _URL = "https://github.com/allenai/gooaq/raw/main/data/qoogle.jsonl"
 
 
47
 
48
 
49
  class Gooaq(datasets.GeneratorBasedBuilder):
50
  """GooAQ - Question-answers, collected from Google"""
51
 
52
- VERSION = datasets.Version("1.1.0")
53
 
54
  def _info(self):
55
  features = datasets.Features(
@@ -83,39 +87,70 @@ class Gooaq(datasets.GeneratorBasedBuilder):
83
  def _split_generators(self, dl_manager):
84
  """Returns SplitGenerators."""
85
 
86
- data_dir = dl_manager.download(_URL)
 
87
  return [
88
  datasets.SplitGenerator(
89
  name=datasets.Split.TRAIN,
90
  gen_kwargs={
91
- "filepath": data_dir,
92
  "split": "train",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
93
  },
94
  ),
95
  ]
96
 
97
  def _generate_examples(
98
- self, filepath, split # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
 
 
 
99
  ):
100
  dominant_classes = ["feat_snip", "collection", "knowledge", "unit_conv", "time_conv", "curr_conv"]
101
 
 
 
 
 
 
 
 
 
 
102
  with open(filepath, encoding="utf-8") as f:
103
  for id_, row in enumerate(f):
104
  data = json.loads(row)
105
 
106
- if data["answer_type"] not in dominant_classes:
107
- yield id_, {
108
- "id": data["id"],
109
- "question": data["question"],
110
- "short_answer": data["short_answer"],
111
- "answer": data["answer"],
112
- "answer_type": -1,
113
- }
114
- else:
115
- yield id_, {
116
- "id": data["id"],
117
- "question": data["question"],
118
- "short_answer": data["short_answer"],
119
- "answer": data["answer"],
120
- "answer_type": data["answer_type"],
121
- }
 
17
 
18
  import json
19
 
20
+ import numpy as np
21
+
22
  import datasets
23
 
24
 
45
 
46
  _LICENSE = "Licensed under the Apache License, Version 2.0"
47
 
48
+ _URL = "https://github.com/allenai/gooaq/raw/main/data/gooaq.jsonl"
49
+
50
+ _SPLITS_URL = "https://github.com/allenai/gooaq/raw/main/data/split.json"
51
 
52
 
53
  class Gooaq(datasets.GeneratorBasedBuilder):
54
  """GooAQ - Question-answers, collected from Google"""
55
 
56
+ VERSION = datasets.Version("1.2.0")
57
 
58
  def _info(self):
59
  features = datasets.Features(
87
  def _split_generators(self, dl_manager):
88
  """Returns SplitGenerators."""
89
 
90
+ data = dl_manager.download(_URL)
91
+ splits = dl_manager.download(_SPLITS_URL)
92
  return [
93
  datasets.SplitGenerator(
94
  name=datasets.Split.TRAIN,
95
  gen_kwargs={
96
+ "filepath": data,
97
  "split": "train",
98
+ "split_file": splits,
99
+ },
100
+ ),
101
+ datasets.SplitGenerator(
102
+ name=datasets.Split.VALIDATION,
103
+ gen_kwargs={
104
+ "filepath": data,
105
+ "split": "dev",
106
+ "split_file": splits,
107
+ },
108
+ ),
109
+ datasets.SplitGenerator(
110
+ name=datasets.Split.TEST,
111
+ gen_kwargs={
112
+ "filepath": data,
113
+ "split": "test",
114
+ "split_file": splits,
115
  },
116
  ),
117
  ]
118
 
119
  def _generate_examples(
120
+ self,
121
+ filepath,
122
+ split,
123
+ split_file, # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
124
  ):
125
  dominant_classes = ["feat_snip", "collection", "knowledge", "unit_conv", "time_conv", "curr_conv"]
126
 
127
+ with open(split_file, encoding="utf-8") as f_split:
128
+ if split == "train":
129
+ split_ids = json.load(f_split)[split]
130
+ split_ids = np.array(split_ids)[:, 0]
131
+ else:
132
+ split_ids = json.load(f_split)[split]
133
+
134
+ split_ids = set(split_ids)
135
+
136
  with open(filepath, encoding="utf-8") as f:
137
  for id_, row in enumerate(f):
138
  data = json.loads(row)
139
 
140
+ if data["id"] in split_ids:
141
+ if data["answer_type"] not in dominant_classes:
142
+ yield id_, {
143
+ "id": data["id"],
144
+ "question": data["question"],
145
+ "short_answer": data["short_answer"],
146
+ "answer": data["answer"],
147
+ "answer_type": -1,
148
+ }
149
+ else:
150
+ yield id_, {
151
+ "id": data["id"],
152
+ "question": data["question"],
153
+ "short_answer": data["short_answer"],
154
+ "answer": data["answer"],
155
+ "answer_type": data["answer_type"],
156
+ }