Datasets:

ArXiv:
frankaging commited on
Commit
8910c0a
1 Parent(s): 9f36bd8

dynasent-v1.1-round1

Browse files
Files changed (2) hide show
  1. dataset_infos.json +1 -0
  2. dynasent.py +123 -92
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
1
+ {"dynabench.dynasent.r1.all": {"description": "DynaSent is an English-language benchmark task for ternary\n (positive/negative/neutral) sentiment analysis.\n For more details on the dataset construction process,\n see https://github.com/cgpotts/dynasent.", "citation": "@article{\n potts-etal-2020-dynasent,\n title={{DynaSent}: A Dynamic Benchmark for Sentiment Analysis},\n author={Potts, Christopher and Wu, Zhengxuan and Geiger, Atticus\n and Kiela, Douwe},\n journal={arXiv preprint arXiv:2012.15349},\n url={https://arxiv.org/abs/2012.15349},\n year={2020}\n }", "homepage": "https://dynabench.org/tasks/3", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "hit_ids": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}, "indices_into_review_text": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "model_0_label": {"dtype": "string", "id": null, "_type": "Value"}, "model_0_probs": {"negative": {"dtype": "float32", "id": null, "_type": "Value"}, "positive": {"dtype": "float32", "id": null, "_type": "Value"}, "neutral": {"dtype": "float32", "id": null, "_type": "Value"}}, "text_id": {"dtype": "string", "id": null, "_type": "Value"}, "review_id": {"dtype": "string", "id": null, "_type": "Value"}, "review_rating": {"dtype": "int32", "id": null, "_type": "Value"}, "label_distribution": {"positive": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "negative": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "neutral": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "mixed": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "gold_label": {"dtype": "string", "id": null, "_type": "Value"}, "metadata": {"split": {"dtype": "string", "id": null, "_type": "Value"}, "round": {"dtype": "int32", "id": null, "_type": "Value"}, "subset": {"dtype": "string", "id": null, "_type": "Value"}, "model_in_the_loop": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "builder_name": "dynabench_dyna_sent", "config_name": "dynabench.dynasent.r1.all", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 23007540, "num_examples": 80488, "dataset_name": "dynabench_dyna_sent"}, "validation": {"name": "validation", "num_bytes": 1057327, "num_examples": 3600, "dataset_name": "dynabench_dyna_sent"}, "test": {"name": "test", "num_bytes": 1035527, "num_examples": 3600, "dataset_name": "dynabench_dyna_sent"}}, "download_checksums": {"https://github.com/cgpotts/dynasent/raw/main/dynasent-v1.1.zip": {"num_bytes": 17051772, "checksum": "33001cf394618aa38f9530c43ca87072b92f5ee609a02afa2d168d25560cedfd"}}, "download_size": 17051772, "post_processing_size": null, "dataset_size": 25100394, "size_in_bytes": 42152166}}
dynasent.py CHANGED
@@ -1,5 +1,5 @@
1
  # coding=utf-8
2
- # Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
  # you may not use this file except in compliance with the License.
@@ -13,21 +13,29 @@
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
  # Lint as: python3
16
- """Dynabench.QA"""
 
 
17
  from __future__ import absolute_import, division, print_function
18
  import json
19
  import os
20
  from collections import OrderedDict
21
  import datasets
22
  logger = datasets.logging.get_logger(__name__)
23
- _VERSION = datasets.Version("1.0.0")
24
  _NUM_ROUNDS = 1
25
  _DESCRIPTION = """\
26
- Dynabench.QA is a Reading Comprehension dataset collected using a human-and-model-in-the-loop.
 
27
  """.strip()
 
 
28
  class DynabenchRoundDetails:
29
- """Round details for DynabenchQA datasets."""
30
- def __init__(self, citation, description, homepage, data_license, data_url, data_features, data_subset_map=None):
 
 
 
31
  self.citation = citation
32
  self.description = description
33
  self.homepage = homepage
@@ -39,73 +47,82 @@ class DynabenchRoundDetails:
39
  _ROUND_DETAILS = {
40
  1: DynabenchRoundDetails(
41
  citation="""\
42
- @article{bartolo2020beat,
43
- author = {Bartolo, Max and Roberts, Alastair and Welbl, Johannes and Riedel, Sebastian and Stenetorp, Pontus},
44
- title = {Beat the AI: Investigating Adversarial Human Annotation for Reading Comprehension},
45
- journal = {Transactions of the Association for Computational Linguistics},
46
- volume = {8},
47
- number = {},
48
- pages = {662-678},
49
- year = {2020},
50
- doi = {10.1162/tacl_a_00338},
51
- URL = { https://doi.org/10.1162/tacl_a_00338 },
52
- eprint = { https://doi.org/10.1162/tacl_a_00338 },
53
- abstract = { Innovations in annotation methodology have been a catalyst for Reading Comprehension (RC) datasets and models. One recent trend to challenge current RC models is to involve a model in the annotation process: Humans create questions adversarially, such that the model fails to answer them correctly. In this work we investigate this annotation methodology and apply it in three different settings, collecting a total of 36,000 samples with progressively stronger models in the annotation loop. This allows us to explore questions such as the reproducibility of the adversarial effect, transfer from data collected with varying model-in-the-loop strengths, and generalization to data collected without a model. We find that training on adversarially collected samples leads to strong generalization to non-adversarially collected datasets, yet with progressive performance deterioration with increasingly stronger models-in-the-loop. Furthermore, we find that stronger models can still learn from datasets collected with substantially weaker models-in-the-loop. When trained on data collected with a BiDAF model in the loop, RoBERTa achieves 39.9F1 on questions that it cannot answer when trained on SQuAD—only marginally lower than when trained on data collected using RoBERTa itself (41.0F1). }
54
  }
55
  """.strip(),
56
  description="""\
57
- Dynabench.QA is a Reading Comprehension dataset collected using a human-and-model-in-the-loop. Round 1 is the AdversarialQA dataset from Bartolo et al., 2020, and consists of questions posed by crowdworkers on a set of Wikipedia articles using an adversarial model-in-the-loop.
58
- We use three different models; BiDAF (Seo et al., 2016), BERT-Large (Devlin et al., 2018), and RoBERTa-Large (Liu et al., 2019) in the annotation loop and construct three datasets; D(BiDAF), D(BERT), and D(RoBERTa), each with 10,000 training examples, 1,000 validation, and 1,000 test examples.
59
- The adversarial human annotation paradigm ensures that these datasets consist of questions that current state-of-the-art models (at least the ones used as adversaries in the annotation loop) find challenging.
60
- For more details on the dataset construction process, see https://adversarialqa.github.io.
61
  """.strip(),
62
- homepage="https://dynabench.org/tasks/2",
63
- data_license="CC BY-SA 3.0",
64
- data_url="https://adversarialqa.github.io/data/aqa_v1.0.zip",
65
  data_features=datasets.Features(
66
  {
67
  "id": datasets.Value("string"),
68
- "title": datasets.Value("string"),
69
- "context": datasets.Value("string"),
70
- "question": datasets.Value("string"),
71
- "answers": datasets.features.Sequence(
72
- {
73
- "text": datasets.Value("string"),
74
- "answer_start": datasets.Value("int32"),
75
- }
76
  ),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
77
  "metadata": {
78
  "split": datasets.Value("string"),
79
  "round": datasets.Value("int32"),
80
  "subset": datasets.Value("string"),
81
  "model_in_the_loop": datasets.Value("string"),
82
- },
83
  }
84
  ),
85
  data_subset_map=OrderedDict({
86
  "all": {
87
- "dir": "combined",
88
- "model": "Combined",
89
- },
90
- "dbidaf": {
91
- "dir": "1_dbidaf",
92
- "model": "BiDAF",
93
- },
94
- "dbert": {
95
- "dir": "2_dbert",
96
- "model": "BERT-Large",
97
- },
98
- "droberta": {
99
- "dir": "3_droberta",
100
- "model": "RoBERTa-Large",
101
- },
102
  }),
103
  )
104
  }
105
- class DynabenchQAConfig(datasets.BuilderConfig):
106
- """BuilderConfig for DynabenchQA datasets."""
 
 
107
  def __init__(self, round, subset='all', **kwargs):
108
- """BuilderConfig for Wikipedia.
109
  Args:
110
  round: integer, the dynabench round to load.
111
  subset: string, the subset of that round's data to load or 'all'.
@@ -114,24 +131,27 @@ class DynabenchQAConfig(datasets.BuilderConfig):
114
  assert isinstance(round, int), "round ({}) must be set and of type integer".format(round)
115
  assert 0 < round <= _NUM_ROUNDS, \
116
  "round (received {}) must be between 1 and {}".format(round, _NUM_ROUNDS)
117
- super(DynabenchQAConfig, self).__init__(
118
- name="dynabench.qa.r{}.{}".format(round, subset),
119
- description="Dynabench QA dataset for round {}, showing dataset selection: {}.".format(round, subset),
120
  **kwargs,
121
  )
122
  self.round = round
123
  self.subset = subset
124
- class DynabenchQA(datasets.GeneratorBasedBuilder):
125
- """Dynabench.QA"""
126
- BUILDER_CONFIG_CLASS = DynabenchQAConfig
 
 
127
  BUILDER_CONFIGS = [
128
- DynabenchQAConfig(
129
  version=_VERSION,
130
  round=round,
131
  subset=subset,
132
  ) # pylint:disable=g-complex-comprehension
133
  for round in range(1, _NUM_ROUNDS+1) for subset in _ROUND_DETAILS[round].data_subset_map
134
  ]
 
135
  def _info(self):
136
  round_details = _ROUND_DETAILS[self.config.round]
137
  return datasets.DatasetInfo(
@@ -139,12 +159,18 @@ class DynabenchQA(datasets.GeneratorBasedBuilder):
139
  features=round_details.data_features,
140
  homepage=round_details.homepage,
141
  citation=round_details.citation,
142
- supervised_keys=None, # No default supervised_keys (as we have to pass both question and context as input).
143
  )
 
144
  @staticmethod
145
  def _get_filepath(dl_dir, round, subset, split):
146
  round_details = _ROUND_DETAILS[round]
147
- return os.path.join(dl_dir, round_details.data_subset_map[subset]["dir"], split + ".json")
 
 
 
 
 
148
  def _split_generators(self, dl_manager):
149
  round_details = _ROUND_DETAILS[self.config.round]
150
  dl_dir = dl_manager.download_and_extract(round_details.data_url)
@@ -152,7 +178,9 @@ class DynabenchQA(datasets.GeneratorBasedBuilder):
152
  datasets.SplitGenerator(
153
  name=datasets.Split.TRAIN,
154
  gen_kwargs={
155
- "filepath": self._get_filepath(dl_dir, self.config.round, self.config.subset, "train"),
 
 
156
  "split": "train",
157
  "round": self.config.round,
158
  "subset": self.config.subset,
@@ -162,7 +190,9 @@ class DynabenchQA(datasets.GeneratorBasedBuilder):
162
  datasets.SplitGenerator(
163
  name=datasets.Split.VALIDATION,
164
  gen_kwargs={
165
- "filepath": self._get_filepath(dl_dir, self.config.round, self.config.subset, "dev"),
 
 
166
  "split": "validation",
167
  "round": self.config.round,
168
  "subset": self.config.subset,
@@ -172,7 +202,9 @@ class DynabenchQA(datasets.GeneratorBasedBuilder):
172
  datasets.SplitGenerator(
173
  name=datasets.Split.TEST,
174
  gen_kwargs={
175
- "filepath": self._get_filepath(dl_dir, self.config.round, self.config.subset, "test"),
 
 
176
  "split": "test",
177
  "round": self.config.round,
178
  "subset": self.config.subset,
@@ -180,35 +212,34 @@ class DynabenchQA(datasets.GeneratorBasedBuilder):
180
  },
181
  ),
182
  ]
 
183
  def _generate_examples(self, filepath, split, round, subset, model_in_the_loop):
184
  """This function returns the examples in the raw (text) form."""
 
185
  logger.info("generating examples from = %s", filepath)
186
  with open(filepath, encoding="utf-8") as f:
187
- squad = json.load(f)
188
- for article in squad["data"]:
189
- title = article.get("title", "").strip()
190
- for paragraph in article["paragraphs"]:
191
- context = paragraph["context"].strip()
192
- for qa in paragraph["qas"]:
193
- question = qa["question"].strip()
194
- id_ = qa["id"]
195
- answer_starts = [answer["answer_start"] for answer in qa["answers"]]
196
- answers = [answer["text"].strip() for answer in qa["answers"]]
197
- # Features currently used are "context", "question", and "answers".
198
- # Others are extracted here for the ease of future expansions.
199
- yield id_, {
200
- "title": title,
201
- "context": context,
202
- "question": question,
203
- "id": id_,
204
- "answers": {
205
- "answer_start": answer_starts,
206
- "text": answers,
207
- },
208
- "metadata": {
209
- "split": split,
210
- "round": round,
211
- "subset": subset,
212
- "model_in_the_loop": model_in_the_loop
213
- },
214
- }
1
  # coding=utf-8
2
+ # Copyright 2021 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors.
3
  #
4
  # Licensed under the Apache License, Version 2.0 (the "License");
5
  # you may not use this file except in compliance with the License.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
  # Lint as: python3
16
+
17
+
18
+ """Dynabench.DynaSent"""
19
  from __future__ import absolute_import, division, print_function
20
  import json
21
  import os
22
  from collections import OrderedDict
23
  import datasets
24
  logger = datasets.logging.get_logger(__name__)
25
+ _VERSION = datasets.Version("1.1.0") # v1.1 fixed for example uid.
26
  _NUM_ROUNDS = 1
27
  _DESCRIPTION = """\
28
+ Dynabench.DynaSent is a Sentiment Analysis dataset collected using a
29
+ human-and-model-in-the-loop.
30
  """.strip()
31
+
32
+
33
  class DynabenchRoundDetails:
34
+ """Round details for Dynabench.DynaSent datasets."""
35
+ def __init__(
36
+ self, citation, description, homepage, data_license, data_url,
37
+ data_features, data_subset_map=None
38
+ ):
39
  self.citation = citation
40
  self.description = description
41
  self.homepage = homepage
47
  _ROUND_DETAILS = {
48
  1: DynabenchRoundDetails(
49
  citation="""\
50
+ @article{
51
+ potts-etal-2020-dynasent,
52
+ title={{DynaSent}: A Dynamic Benchmark for Sentiment Analysis},
53
+ author={Potts, Christopher and Wu, Zhengxuan and Geiger, Atticus
54
+ and Kiela, Douwe},
55
+ journal={arXiv preprint arXiv:2012.15349},
56
+ url={https://arxiv.org/abs/2012.15349},
57
+ year={2020}
 
 
 
 
58
  }
59
  """.strip(),
60
  description="""\
61
+ DynaSent is an English-language benchmark task for ternary
62
+ (positive/negative/neutral) sentiment analysis.
63
+ For more details on the dataset construction process,
64
+ see https://github.com/cgpotts/dynasent.
65
  """.strip(),
66
+ homepage="https://dynabench.org/tasks/3",
67
+ data_license="CC BY 4.0",
68
+ data_url="https://github.com/cgpotts/dynasent/raw/main/dynasent-v1.1.zip",
69
  data_features=datasets.Features(
70
  {
71
  "id": datasets.Value("string"),
72
+ "hit_ids": datasets.features.Sequence(
73
+ datasets.Value("string")
 
 
 
 
 
 
74
  ),
75
+ "sentence": datasets.Value("string"),
76
+ "indices_into_review_text": datasets.features.Sequence(
77
+ datasets.Value("int32")
78
+ ),
79
+ "model_0_label": datasets.Value("string"),
80
+ "model_0_probs": {
81
+ "negative": datasets.Value("float32"),
82
+ "positive": datasets.Value("float32"),
83
+ "neutral": datasets.Value("float32")
84
+ },
85
+ "text_id": datasets.Value("string"),
86
+ "review_id": datasets.Value("string"),
87
+ "review_rating": datasets.Value("int32"),
88
+ "label_distribution": {
89
+ "positive": datasets.features.Sequence(
90
+ datasets.Value("string")
91
+ ),
92
+ "negative": datasets.features.Sequence(
93
+ datasets.Value("string")
94
+ ),
95
+ "neutral": datasets.features.Sequence(
96
+ datasets.Value("string")
97
+ ),
98
+ "mixed": datasets.features.Sequence(
99
+ datasets.Value("string")
100
+ )
101
+ },
102
+ "gold_label": datasets.Value("string"),
103
  "metadata": {
104
  "split": datasets.Value("string"),
105
  "round": datasets.Value("int32"),
106
  "subset": datasets.Value("string"),
107
  "model_in_the_loop": datasets.Value("string"),
108
+ }
109
  }
110
  ),
111
  data_subset_map=OrderedDict({
112
  "all": {
113
+ "dir": "dynasent-v1.1",
114
+ "file_prefix": "dynasent-v1.1-round01-yelp-",
115
+ "model": "RoBERTa"
116
+ }
 
 
 
 
 
 
 
 
 
 
 
117
  }),
118
  )
119
  }
120
+
121
+
122
+ class DynabenchDynaSentConfig(datasets.BuilderConfig):
123
+ """BuilderConfig for Dynabench.DynaSent datasets."""
124
  def __init__(self, round, subset='all', **kwargs):
125
+ """BuilderConfig for Dynabench.DynaSent.
126
  Args:
127
  round: integer, the dynabench round to load.
128
  subset: string, the subset of that round's data to load or 'all'.
131
  assert isinstance(round, int), "round ({}) must be set and of type integer".format(round)
132
  assert 0 < round <= _NUM_ROUNDS, \
133
  "round (received {}) must be between 1 and {}".format(round, _NUM_ROUNDS)
134
+ super(DynabenchDynaSentConfig, self).__init__(
135
+ name="dynabench.dynasent.r{}.{}".format(round, subset),
136
+ description="Dynabench DynaSent dataset for round {}, showing dataset selection: {}.".format(round, subset),
137
  **kwargs,
138
  )
139
  self.round = round
140
  self.subset = subset
141
+
142
+
143
+ class DynabenchDynaSent(datasets.GeneratorBasedBuilder):
144
+ """Dynabench.DynaSent"""
145
+ BUILDER_CONFIG_CLASS = DynabenchDynaSentConfig
146
  BUILDER_CONFIGS = [
147
+ DynabenchDynaSentConfig(
148
  version=_VERSION,
149
  round=round,
150
  subset=subset,
151
  ) # pylint:disable=g-complex-comprehension
152
  for round in range(1, _NUM_ROUNDS+1) for subset in _ROUND_DETAILS[round].data_subset_map
153
  ]
154
+
155
  def _info(self):
156
  round_details = _ROUND_DETAILS[self.config.round]
157
  return datasets.DatasetInfo(
159
  features=round_details.data_features,
160
  homepage=round_details.homepage,
161
  citation=round_details.citation,
162
+ supervised_keys=None
163
  )
164
+
165
  @staticmethod
166
  def _get_filepath(dl_dir, round, subset, split):
167
  round_details = _ROUND_DETAILS[round]
168
+ return os.path.join(
169
+ dl_dir,
170
+ round_details.data_subset_map[subset]["dir"],
171
+ round_details.data_subset_map[subset]["file_prefix"] + split + ".jsonl"
172
+ )
173
+
174
  def _split_generators(self, dl_manager):
175
  round_details = _ROUND_DETAILS[self.config.round]
176
  dl_dir = dl_manager.download_and_extract(round_details.data_url)
178
  datasets.SplitGenerator(
179
  name=datasets.Split.TRAIN,
180
  gen_kwargs={
181
+ "filepath": self._get_filepath(
182
+ dl_dir, self.config.round, self.config.subset, "train"
183
+ ),
184
  "split": "train",
185
  "round": self.config.round,
186
  "subset": self.config.subset,
190
  datasets.SplitGenerator(
191
  name=datasets.Split.VALIDATION,
192
  gen_kwargs={
193
+ "filepath": self._get_filepath(
194
+ dl_dir, self.config.round, self.config.subset, "dev"
195
+ ),
196
  "split": "validation",
197
  "round": self.config.round,
198
  "subset": self.config.subset,
202
  datasets.SplitGenerator(
203
  name=datasets.Split.TEST,
204
  gen_kwargs={
205
+ "filepath": self._get_filepath(
206
+ dl_dir, self.config.round, self.config.subset, "test"
207
+ ),
208
  "split": "test",
209
  "round": self.config.round,
210
  "subset": self.config.subset,
212
  },
213
  ),
214
  ]
215
+
216
  def _generate_examples(self, filepath, split, round, subset, model_in_the_loop):
217
  """This function returns the examples in the raw (text) form."""
218
+ ternary_labels = ('positive', 'negative', 'neutral') # Enforce to be the tenary version now.
219
  logger.info("generating examples from = %s", filepath)
220
  with open(filepath, encoding="utf-8") as f:
221
+ for line in f:
222
+ d = json.loads(line)
223
+ if d['gold_label'] in ternary_labels:
224
+ # Construct DynaSent features.
225
+ yield d["text_id"], {
226
+ "id": d["text_id"],
227
+ # DynaSent Example.
228
+ "hit_ids": d["hit_ids"],
229
+ "sentence": d["sentence"],
230
+ "indices_into_review_text": d["indices_into_review_text"],
231
+ "model_0_label": d["model_0_label"],
232
+ "model_0_probs": d["model_0_probs"],
233
+ "text_id": d["text_id"],
234
+ "review_id": d["review_id"],
235
+ "review_rating": d["review_rating"],
236
+ "label_distribution": d["label_distribution"],
237
+ "gold_label": d["gold_label"],
238
+ # Metadata.
239
+ "metadata": {
240
+ "split": split,
241
+ "round": round,
242
+ "subset": subset,
243
+ "model_in_the_loop": model_in_the_loop
244
+ },
245
+ }