Datasets:

ArXiv:
frankaging commited on
Commit
3bb3188
1 Parent(s): 8910c0a

add round2

Browse files
Files changed (2) hide show
  1. dataset_infos.json +1 -1
  2. dynasent.py +153 -23
dataset_infos.json CHANGED
@@ -1 +1 @@
1
- {"dynabench.dynasent.r1.all": {"description": "DynaSent is an English-language benchmark task for ternary\n (positive/negative/neutral) sentiment analysis.\n For more details on the dataset construction process,\n see https://github.com/cgpotts/dynasent.", "citation": "@article{\n potts-etal-2020-dynasent,\n title={{DynaSent}: A Dynamic Benchmark for Sentiment Analysis},\n author={Potts, Christopher and Wu, Zhengxuan and Geiger, Atticus\n and Kiela, Douwe},\n journal={arXiv preprint arXiv:2012.15349},\n url={https://arxiv.org/abs/2012.15349},\n year={2020}\n }", "homepage": "https://dynabench.org/tasks/3", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "hit_ids": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}, "indices_into_review_text": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "model_0_label": {"dtype": "string", "id": null, "_type": "Value"}, "model_0_probs": {"negative": {"dtype": "float32", "id": null, "_type": "Value"}, "positive": {"dtype": "float32", "id": null, "_type": "Value"}, "neutral": {"dtype": "float32", "id": null, "_type": "Value"}}, "text_id": {"dtype": "string", "id": null, "_type": "Value"}, "review_id": {"dtype": "string", "id": null, "_type": "Value"}, "review_rating": {"dtype": "int32", "id": null, "_type": "Value"}, "label_distribution": {"positive": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "negative": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "neutral": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "mixed": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "gold_label": {"dtype": "string", "id": null, "_type": "Value"}, "metadata": {"split": {"dtype": "string", "id": null, "_type": "Value"}, "round": {"dtype": "int32", "id": null, "_type": "Value"}, "subset": {"dtype": "string", "id": null, "_type": "Value"}, "model_in_the_loop": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "builder_name": "dynabench_dyna_sent", "config_name": "dynabench.dynasent.r1.all", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 23007540, "num_examples": 80488, "dataset_name": "dynabench_dyna_sent"}, "validation": {"name": "validation", "num_bytes": 1057327, "num_examples": 3600, "dataset_name": "dynabench_dyna_sent"}, "test": {"name": "test", "num_bytes": 1035527, "num_examples": 3600, "dataset_name": "dynabench_dyna_sent"}}, "download_checksums": {"https://github.com/cgpotts/dynasent/raw/main/dynasent-v1.1.zip": {"num_bytes": 17051772, "checksum": "33001cf394618aa38f9530c43ca87072b92f5ee609a02afa2d168d25560cedfd"}}, "download_size": 17051772, "post_processing_size": null, "dataset_size": 25100394, "size_in_bytes": 42152166}}
 
1
+ {"dynabench.dynasent.r1.all": {"description": "DynaSent is an English-language benchmark task for ternary\n (positive/negative/neutral) sentiment analysis.\n For more details on the dataset construction process,\n see https://github.com/cgpotts/dynasent.", "citation": "@article{\n potts-etal-2020-dynasent,\n title={{DynaSent}: A Dynamic Benchmark for Sentiment Analysis},\n author={Potts, Christopher and Wu, Zhengxuan and Geiger, Atticus\n and Kiela, Douwe},\n journal={arXiv preprint arXiv:2012.15349},\n url={https://arxiv.org/abs/2012.15349},\n year={2020}\n }", "homepage": "https://dynabench.org/tasks/3", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "hit_ids": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}, "indices_into_review_text": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "model_0_label": {"dtype": "string", "id": null, "_type": "Value"}, "model_0_probs": {"negative": {"dtype": "float32", "id": null, "_type": "Value"}, "positive": {"dtype": "float32", "id": null, "_type": "Value"}, "neutral": {"dtype": "float32", "id": null, "_type": "Value"}}, "text_id": {"dtype": "string", "id": null, "_type": "Value"}, "review_id": {"dtype": "string", "id": null, "_type": "Value"}, "review_rating": {"dtype": "int32", "id": null, "_type": "Value"}, "label_distribution": {"positive": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "negative": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "neutral": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "mixed": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "gold_label": {"dtype": "string", "id": null, "_type": "Value"}, "metadata": {"split": {"dtype": "string", "id": null, "_type": "Value"}, "round": {"dtype": "int32", "id": null, "_type": "Value"}, "subset": {"dtype": "string", "id": null, "_type": "Value"}, "model_in_the_loop": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "builder_name": "dynabench_dyna_sent", "config_name": "dynabench.dynasent.r1.all", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 23007540, "num_examples": 80488, "dataset_name": "dynabench_dyna_sent"}, "validation": {"name": "validation", "num_bytes": 1057327, "num_examples": 3600, "dataset_name": "dynabench_dyna_sent"}, "test": {"name": "test", "num_bytes": 1035527, "num_examples": 3600, "dataset_name": "dynabench_dyna_sent"}}, "download_checksums": {"https://github.com/cgpotts/dynasent/raw/main/dynasent-v1.1.zip": {"num_bytes": 17051772, "checksum": "33001cf394618aa38f9530c43ca87072b92f5ee609a02afa2d168d25560cedfd"}}, "download_size": 17051772, "post_processing_size": null, "dataset_size": 25100394, "size_in_bytes": 42152166}, "dynabench.dynasent.r2.all": {"description": "DynaSent is an English-language benchmark task for ternary\n (positive/negative/neutral) sentiment analysis.\n For more details on the dataset construction process,\n see https://github.com/cgpotts/dynasent.", "citation": "@article{\n potts-etal-2020-dynasent,\n title={{DynaSent}: A Dynamic Benchmark for Sentiment Analysis},\n author={Potts, Christopher and Wu, Zhengxuan and Geiger, Atticus\n and Kiela, Douwe},\n journal={arXiv preprint arXiv:2012.15349},\n url={https://arxiv.org/abs/2012.15349},\n year={2020}\n }", "homepage": "https://dynabench.org/tasks/3", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "hit_ids": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "sentence": {"dtype": "string", "id": null, "_type": "Value"}, "sentence_author": {"dtype": "string", "id": null, "_type": "Value"}, "has_prompt": {"dtype": "bool", "id": null, "_type": "Value"}, "prompt_data": {"indices_into_review_text": {"feature": {"dtype": "int32", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "review_rating": {"dtype": "int32", "id": null, "_type": "Value"}, "prompt_sentence": {"dtype": "string", "id": null, "_type": "Value"}, "review_id": {"dtype": "string", "id": null, "_type": "Value"}}, "model_1_label": {"dtype": "string", "id": null, "_type": "Value"}, "model_1_probs": {"negative": {"dtype": "float32", "id": null, "_type": "Value"}, "positive": {"dtype": "float32", "id": null, "_type": "Value"}, "neutral": {"dtype": "float32", "id": null, "_type": "Value"}}, "text_id": {"dtype": "string", "id": null, "_type": "Value"}, "label_distribution": {"positive": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "negative": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "neutral": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}, "mixed": {"feature": {"dtype": "string", "id": null, "_type": "Value"}, "length": -1, "id": null, "_type": "Sequence"}}, "gold_label": {"dtype": "string", "id": null, "_type": "Value"}, "metadata": {"split": {"dtype": "string", "id": null, "_type": "Value"}, "round": {"dtype": "int32", "id": null, "_type": "Value"}, "subset": {"dtype": "string", "id": null, "_type": "Value"}, "model_in_the_loop": {"dtype": "string", "id": null, "_type": "Value"}}}, "post_processed": null, "supervised_keys": null, "builder_name": "dynabench_dyna_sent", "config_name": "dynabench.dynasent.r2.all", "version": {"version_str": "1.1.0", "description": null, "major": 1, "minor": 1, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 4604051, "num_examples": 13065, "dataset_name": "dynabench_dyna_sent"}, "validation": {"name": "validation", "num_bytes": 264059, "num_examples": 720, "dataset_name": "dynabench_dyna_sent"}, "test": {"name": "test", "num_bytes": 259782, "num_examples": 720, "dataset_name": "dynabench_dyna_sent"}}, "download_checksums": {"https://github.com/cgpotts/dynasent/raw/main/dynasent-v1.1.zip": {"num_bytes": 17051772, "checksum": "33001cf394618aa38f9530c43ca87072b92f5ee609a02afa2d168d25560cedfd"}}, "download_size": 17051772, "post_processing_size": null, "dataset_size": 5127892, "size_in_bytes": 22179664}}
dynasent.py CHANGED
@@ -23,7 +23,7 @@ from collections import OrderedDict
23
  import datasets
24
  logger = datasets.logging.get_logger(__name__)
25
  _VERSION = datasets.Version("1.1.0") # v1.1 fixed for example uid.
26
- _NUM_ROUNDS = 1
27
  _DESCRIPTION = """\
28
  Dynabench.DynaSent is a Sentiment Analysis dataset collected using a
29
  human-and-model-in-the-loop.
@@ -115,6 +115,82 @@ _ROUND_DETAILS = {
115
  "model": "RoBERTa"
116
  }
117
  }),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
  )
119
  }
120
 
@@ -221,25 +297,79 @@ class DynabenchDynaSent(datasets.GeneratorBasedBuilder):
221
  for line in f:
222
  d = json.loads(line)
223
  if d['gold_label'] in ternary_labels:
224
- # Construct DynaSent features.
225
- yield d["text_id"], {
226
- "id": d["text_id"],
227
- # DynaSent Example.
228
- "hit_ids": d["hit_ids"],
229
- "sentence": d["sentence"],
230
- "indices_into_review_text": d["indices_into_review_text"],
231
- "model_0_label": d["model_0_label"],
232
- "model_0_probs": d["model_0_probs"],
233
- "text_id": d["text_id"],
234
- "review_id": d["review_id"],
235
- "review_rating": d["review_rating"],
236
- "label_distribution": d["label_distribution"],
237
- "gold_label": d["gold_label"],
238
- # Metadata.
239
- "metadata": {
240
- "split": split,
241
- "round": round,
242
- "subset": subset,
243
- "model_in_the_loop": model_in_the_loop
244
- },
245
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
  import datasets
24
  logger = datasets.logging.get_logger(__name__)
25
  _VERSION = datasets.Version("1.1.0") # v1.1 fixed for example uid.
26
+ _NUM_ROUNDS = 2
27
  _DESCRIPTION = """\
28
  Dynabench.DynaSent is a Sentiment Analysis dataset collected using a
29
  human-and-model-in-the-loop.
 
115
  "model": "RoBERTa"
116
  }
117
  }),
118
+ ),
119
+ 2: DynabenchRoundDetails(
120
+ citation="""\
121
+ @article{
122
+ potts-etal-2020-dynasent,
123
+ title={{DynaSent}: A Dynamic Benchmark for Sentiment Analysis},
124
+ author={Potts, Christopher and Wu, Zhengxuan and Geiger, Atticus
125
+ and Kiela, Douwe},
126
+ journal={arXiv preprint arXiv:2012.15349},
127
+ url={https://arxiv.org/abs/2012.15349},
128
+ year={2020}
129
+ }
130
+ """.strip(),
131
+ description="""\
132
+ DynaSent is an English-language benchmark task for ternary
133
+ (positive/negative/neutral) sentiment analysis.
134
+ For more details on the dataset construction process,
135
+ see https://github.com/cgpotts/dynasent.
136
+ """.strip(),
137
+ homepage="https://dynabench.org/tasks/3",
138
+ data_license="CC BY 4.0",
139
+ data_url="https://github.com/cgpotts/dynasent/raw/main/dynasent-v1.1.zip",
140
+ data_features=datasets.Features(
141
+ {
142
+ "id": datasets.Value("string"),
143
+ "hit_ids": datasets.features.Sequence(
144
+ datasets.Value("string")
145
+ ),
146
+ "sentence": datasets.Value("string"),
147
+ "sentence_author": datasets.Value("string"),
148
+ "has_prompt": datasets.Value("bool"),
149
+ "prompt_data": {
150
+ "indices_into_review_text": datasets.features.Sequence(
151
+ datasets.Value("int32")
152
+ ),
153
+ "review_rating": datasets.Value("int32"),
154
+ "prompt_sentence": datasets.Value("string"),
155
+ "review_id": datasets.Value("string")
156
+ },
157
+ "model_1_label": datasets.Value("string"),
158
+ "model_1_probs": {
159
+ "negative": datasets.Value("float32"),
160
+ "positive": datasets.Value("float32"),
161
+ "neutral": datasets.Value("float32")
162
+ },
163
+ "text_id": datasets.Value("string"),
164
+ "label_distribution": {
165
+ "positive": datasets.features.Sequence(
166
+ datasets.Value("string")
167
+ ),
168
+ "negative": datasets.features.Sequence(
169
+ datasets.Value("string")
170
+ ),
171
+ "neutral": datasets.features.Sequence(
172
+ datasets.Value("string")
173
+ ),
174
+ "mixed": datasets.features.Sequence(
175
+ datasets.Value("string")
176
+ )
177
+ },
178
+ "gold_label": datasets.Value("string"),
179
+ "metadata": {
180
+ "split": datasets.Value("string"),
181
+ "round": datasets.Value("int32"),
182
+ "subset": datasets.Value("string"),
183
+ "model_in_the_loop": datasets.Value("string"),
184
+ }
185
+ }
186
+ ),
187
+ data_subset_map=OrderedDict({
188
+ "all": {
189
+ "dir": "dynasent-v1.1",
190
+ "file_prefix": "dynasent-v1.1-round02-dynabench-",
191
+ "model": "RoBERTa"
192
+ }
193
+ }),
194
  )
195
  }
196
 
 
297
  for line in f:
298
  d = json.loads(line)
299
  if d['gold_label'] in ternary_labels:
300
+ if round == 1:
301
+ # Construct DynaSent features.
302
+ yield d["text_id"], {
303
+ "id": d["text_id"],
304
+ # DynaSent Example.
305
+ "hit_ids": d["hit_ids"],
306
+ "sentence": d["sentence"],
307
+ "indices_into_review_text": d["indices_into_review_text"],
308
+ "model_0_label": d["model_0_label"],
309
+ "model_0_probs": d["model_0_probs"],
310
+ "text_id": d["text_id"],
311
+ "review_id": d["review_id"],
312
+ "review_rating": d["review_rating"],
313
+ "label_distribution": d["label_distribution"],
314
+ "gold_label": d["gold_label"],
315
+ # Metadata.
316
+ "metadata": {
317
+ "split": split,
318
+ "round": round,
319
+ "subset": subset,
320
+ "model_in_the_loop": model_in_the_loop
321
+ }
322
+ }
323
+ elif round == 2:
324
+ # Construct DynaSent features.
325
+ if d["has_prompt"]:
326
+ if "indices_into_review_text" in d["prompt_data"]:
327
+ indices_into_review_text = d["prompt_data"]["indices_into_review_text"]
328
+ else:
329
+ indices_into_review_text = []
330
+ if "review_rating" in d["prompt_data"]:
331
+ review_rating = d["prompt_data"]["review_rating"]
332
+ else:
333
+ review_rating = -1 # -1 means unknown.
334
+ if "review_id" in d["prompt_data"]:
335
+ review_id = d["prompt_data"]["review_id"]
336
+ else:
337
+ review_id = ""
338
+ if "prompt_sentence" in d["prompt_data"]:
339
+ prompt_sentence = d["prompt_data"]["prompt_sentence"]
340
+ else:
341
+ prompt_sentence = ""
342
+ prompt_data = {
343
+ "indices_into_review_text": indices_into_review_text,
344
+ "review_rating": review_rating,
345
+ "prompt_sentence": prompt_sentence,
346
+ "review_id": review_id,
347
+ }
348
+ else:
349
+ prompt_data = {
350
+ "indices_into_review_text": [],
351
+ "review_rating": -1, # -1 means unknown.
352
+ "prompt_sentence": "",
353
+ "review_id": "",
354
+ }
355
+ yield d["text_id"], {
356
+ "id": d["text_id"],
357
+ # DynaSent Example.
358
+ "hit_ids": d["hit_ids"],
359
+ "sentence": d["sentence"],
360
+ "sentence_author": d["sentence_author"],
361
+ "has_prompt": d["has_prompt"],
362
+ "prompt_data": prompt_data,
363
+ "model_1_label": d["model_1_label"],
364
+ "model_1_probs": d["model_1_probs"],
365
+ "text_id": d["text_id"],
366
+ "label_distribution": d["label_distribution"],
367
+ "gold_label": d["gold_label"],
368
+ # Metadata.
369
+ "metadata": {
370
+ "split": split,
371
+ "round": round,
372
+ "subset": subset,
373
+ "model_in_the_loop": model_in_the_loop
374
+ }
375
+ }