jmhessel commited on
Commit
801f63d
1 Parent(s): 64a5a89

Upload newyorker_caption_contest.py

Browse files
Files changed (1) hide show
  1. newyorker_caption_contest.py +402 -0
newyorker_caption_contest.py ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Lint as: python3
3
+ """The Caption Contest benchmark."""
4
+
5
+
6
+ import json
7
+ import os
8
+ import datasets
9
+ import base64
10
+ import pprint
11
+
12
+
13
+ _CAPTION_CONTEST_TASKS_CITATION = """\
14
+ @article{hessel2022androids,
15
+ title={Do Androids Laugh at Electric Sheep? Humor" Understanding" Benchmarks from The New Yorker Caption Contest},
16
+ author={Hessel, Jack and Marasovi{\'c}, Ana and Hwang, Jena D and Lee, Lillian and Da, Jeff and Zellers, Rowan and Mankoff, Robert and Choi, Yejin},
17
+ journal={arXiv preprint arXiv:2209.06293},
18
+ year={2022}
19
+ }
20
+
21
+ www.capcon.dev
22
+
23
+ Our data contributions are:
24
+
25
+ - The cartoon-level annotations;
26
+ - The joke explanations;
27
+ - and the framing of the tasks
28
+ We release these data we contribute under CC-BY (see DATASET_LICENSE).
29
+
30
+ If you find this data useful in your work, in addition to citing our contributions, please also cite the following, from which the cartoons/captions in our corpus are derived:
31
+
32
+ @misc{newyorkernextmldataset,
33
+ author={Jain, Lalit and Jamieson, Kevin and Mankoff, Robert and Nowak, Robert and Sievert, Scott},
34
+ title={The {N}ew {Y}orker Cartoon Caption Contest Dataset},
35
+ year={2020},
36
+ url={https://nextml.github.io/caption-contest-data/}
37
+ }
38
+
39
+ @inproceedings{radev-etal-2016-humor,
40
+ title = "Humor in Collective Discourse: Unsupervised Funniness Detection in The {New Yorker} Cartoon Caption Contest",
41
+ author = "Radev, Dragomir and
42
+ Stent, Amanda and
43
+ Tetreault, Joel and
44
+ Pappu, Aasish and
45
+ Iliakopoulou, Aikaterini and
46
+ Chanfreau, Agustin and
47
+ de Juan, Paloma and
48
+ Vallmitjana, Jordi and
49
+ Jaimes, Alejandro and
50
+ Jha, Rahul and
51
+ Mankoff, Robert",
52
+ booktitle = "LREC",
53
+ year = "2016",
54
+ }
55
+
56
+ @inproceedings{shahaf2015inside,
57
+ title={Inside jokes: Identifying humorous cartoon captions},
58
+ author={Shahaf, Dafna and Horvitz, Eric and Mankoff, Robert},
59
+ booktitle={KDD},
60
+ year={2015},
61
+ }
62
+ """
63
+
64
+
65
+ _CAPTION_CONTEST_DESCRIPTION = """\
66
+ There are 3 caption contest tasks, described in the paper. In the Matching multiple choice task, models must recognize a caption written about a cartoon (vs. options that were not). In the Quality Ranking task, models must evaluate the quality
67
+ of that caption by scoring it more highly than a lower quality option from the same contest. In the Explanation Generation task, models must explain why the joke is funny.
68
+ """
69
+
70
+ _MATCHING_DESCRIPTION = """\
71
+ You are given a cartoon and 5 captions. Only one of the captions was truly written about the cartoon. You must select it.
72
+ """
73
+
74
+ _RANKING_DESCRIPTION = """\
75
+ You are given a cartoon and 2 captions. One of the captions was selected by crowd voting or New Yorker editors as high quality. You must select it.
76
+ """
77
+
78
+ _EXPLANATION_DESCRIPTION = """\
79
+ You are given a cartoon and a caption that was written about it. You must autoregressively generate a joke explanation.
80
+ """
81
+
82
+
83
+ _IMAGES_URL = "https://storage.googleapis.com/ai2-jack-public/caption_contest_data_public/all_contest_images.zip"
84
+
85
+
86
+ def _get_configs_crossvals():
87
+ cross_val_configs = []
88
+ for split_idx in [1,2,3,4]:
89
+ cur_split_configs = [
90
+ CaptionContestConfig(
91
+ name='matching_{}'.format(split_idx),
92
+ description=_MATCHING_DESCRIPTION,
93
+ features=[
94
+ 'image',
95
+ 'contest_number',
96
+ 'image_location',
97
+ 'image_description',
98
+ 'image_uncanny_description',
99
+ 'entities',
100
+ 'questions',
101
+ 'caption_choices',
102
+ 'from_description',
103
+ ],
104
+ label_classes=["A", "B", "C", "D", "E"],
105
+ data_url='https://storage.googleapis.com/ai2-jack-public/caption_contest_data_public/huggingface_hub/v1.0/matching_{}.zip'.format(split_idx),
106
+ url='www.capcon.dev',
107
+ citation=_CAPTION_CONTEST_TASKS_CITATION,
108
+ ),
109
+
110
+ CaptionContestConfig(
111
+ name='matching_from_pixels_{}'.format(split_idx),
112
+ description=_MATCHING_DESCRIPTION,
113
+ features=[
114
+ 'image',
115
+ 'contest_number',
116
+ 'caption_choices',
117
+ ],
118
+ label_classes=["A", "B", "C", "D", "E"],
119
+ data_url='https://storage.googleapis.com/ai2-jack-public/caption_contest_data_public/huggingface_hub/v1.0/matching_from_pixels_{}.zip'.format(split_idx),
120
+ url='www.capcon.dev',
121
+ citation=_CAPTION_CONTEST_TASKS_CITATION,
122
+ ),
123
+
124
+ CaptionContestConfig(
125
+ name='ranking_{}'.format(split_idx),
126
+ description=_RANKING_DESCRIPTION,
127
+ features=[
128
+ 'image',
129
+ 'contest_number',
130
+ 'image_location',
131
+ 'image_description',
132
+ 'image_uncanny_description',
133
+ 'entities',
134
+ 'questions',
135
+ 'caption_choices',
136
+ 'from_description',
137
+ 'winner_source',
138
+ ],
139
+
140
+ label_classes=["A", "B"],
141
+ data_url='https://storage.googleapis.com/ai2-jack-public/caption_contest_data_public/huggingface_hub/v1.0/ranking_{}.zip'.format(split_idx),
142
+ url='www.capcon.dev',
143
+ citation=_CAPTION_CONTEST_TASKS_CITATION,
144
+ ),
145
+
146
+ CaptionContestConfig(
147
+ name='ranking_from_pixels_{}'.format(split_idx),
148
+ description=_RANKING_DESCRIPTION,
149
+ features=[
150
+ 'image',
151
+ 'contest_number',
152
+ 'caption_choices',
153
+ 'winner_source',
154
+ ],
155
+ label_classes=["A", "B"],
156
+ data_url='https://storage.googleapis.com/ai2-jack-public/caption_contest_data_public/huggingface_hub/v1.0/ranking_from_pixels_{}.zip'.format(split_idx),
157
+ url='www.capcon.dev',
158
+ citation=_CAPTION_CONTEST_TASKS_CITATION,
159
+ ),
160
+
161
+
162
+ CaptionContestConfig(
163
+ name='explanation_{}'.format(split_idx),
164
+ description=_EXPLANATION_DESCRIPTION,
165
+ features=[
166
+ 'image',
167
+ 'contest_number',
168
+ 'image_location',
169
+ 'image_description',
170
+ 'image_uncanny_description',
171
+ 'entities',
172
+ 'questions',
173
+ 'caption_choices',
174
+ 'from_description',
175
+ ],
176
+ label_classes=None,
177
+ data_url='https://storage.googleapis.com/ai2-jack-public/caption_contest_data_public/huggingface_hub/v1.0/explanation_{}.zip'.format(split_idx),
178
+ url='www.capcon.dev',
179
+ citation=_CAPTION_CONTEST_TASKS_CITATION,
180
+ ),
181
+
182
+ CaptionContestConfig(
183
+ name='explanation_from_pixels_{}'.format(split_idx),
184
+ description=_EXPLANATION_DESCRIPTION,
185
+ features=[
186
+ 'image',
187
+ 'contest_number',
188
+ 'caption_choices',
189
+ ],
190
+ label_classes=None,
191
+ data_url='https://storage.googleapis.com/ai2-jack-public/caption_contest_data_public/huggingface_hub/v1.0/explanation_from_pixels_{}.zip'.format(split_idx),
192
+ url='www.capcon.dev',
193
+ citation=_CAPTION_CONTEST_TASKS_CITATION,
194
+ ),
195
+ ]
196
+ cross_val_configs.extend(cur_split_configs)
197
+ return cross_val_configs
198
+
199
+
200
+ class CaptionContestConfig(datasets.BuilderConfig):
201
+ """BuilderConfig for Caption Contest."""
202
+
203
+ def __init__(self, features, data_url, citation, url, label_classes=None, **kwargs):
204
+ """BuilderConfig for Caption Contest.
205
+ Args:
206
+ features: `list[string]`, list of the features that will appear in the
207
+ feature dict. Should not include "label".
208
+ data_url: `string`, url to download the zip file from.
209
+ citation: `string`, citation for the data set.
210
+ url: `string`, url for information about the data set.
211
+ label_classes: `list[string]`, the list of classes for the label if the
212
+ label is present as a string. If not provided, there is no fixed label set.
213
+ **kwargs: keyword arguments forwarded to super.
214
+ """
215
+
216
+ super(CaptionContestConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
217
+ self.features = features
218
+ self.data_url = data_url
219
+ self.citation = citation
220
+ self.url = url
221
+ self.label_classes = label_classes
222
+
223
+
224
+ class CaptionContest(datasets.GeneratorBasedBuilder):
225
+ """The CaptionContest benchmark."""
226
+
227
+ BUILDER_CONFIGS = [
228
+ CaptionContestConfig(
229
+ name='matching',
230
+ description=_MATCHING_DESCRIPTION,
231
+ features=[
232
+ 'image',
233
+ 'contest_number',
234
+ 'image_location',
235
+ 'image_description',
236
+ 'image_uncanny_description',
237
+ 'entities',
238
+ 'questions',
239
+ 'caption_choices',
240
+ 'from_description',
241
+ ],
242
+ label_classes=["A", "B", "C", "D", "E"],
243
+ data_url='https://storage.googleapis.com/ai2-jack-public/caption_contest_data_public/huggingface_hub/v1.0/matching.zip',
244
+ url='www.capcon.dev',
245
+ citation=_CAPTION_CONTEST_TASKS_CITATION,
246
+ ),
247
+
248
+ CaptionContestConfig(
249
+ name='matching_from_pixels',
250
+ description=_MATCHING_DESCRIPTION,
251
+ features=[
252
+ 'image',
253
+ 'contest_number',
254
+ 'caption_choices',
255
+ ],
256
+ label_classes=["A", "B", "C", "D", "E"],
257
+ data_url='https://storage.googleapis.com/ai2-jack-public/caption_contest_data_public/huggingface_hub/v1.0/matching_from_pixels.zip',
258
+ url='www.capcon.dev',
259
+ citation=_CAPTION_CONTEST_TASKS_CITATION,
260
+ ),
261
+
262
+ CaptionContestConfig(
263
+ name='ranking',
264
+ description=_RANKING_DESCRIPTION,
265
+ features=[
266
+ 'image',
267
+ 'contest_number',
268
+ 'image_location',
269
+ 'image_description',
270
+ 'image_uncanny_description',
271
+ 'entities',
272
+ 'questions',
273
+ 'caption_choices',
274
+ 'from_description',
275
+ 'winner_source',
276
+ ],
277
+ label_classes=["A", "B"],
278
+ data_url='https://storage.googleapis.com/ai2-jack-public/caption_contest_data_public/huggingface_hub/v1.0/ranking.zip',
279
+ url='www.capcon.dev',
280
+ citation=_CAPTION_CONTEST_TASKS_CITATION,
281
+ ),
282
+
283
+ CaptionContestConfig(
284
+ name='ranking_from_pixels',
285
+ description=_RANKING_DESCRIPTION,
286
+ features=[
287
+ 'image',
288
+ 'contest_number',
289
+ 'caption_choices',
290
+ 'winner_source',
291
+ ],
292
+ label_classes=["A", "B"],
293
+ data_url='https://storage.googleapis.com/ai2-jack-public/caption_contest_data_public/huggingface_hub/v1.0/ranking_from_pixels.zip',
294
+ url='www.capcon.dev',
295
+ citation=_CAPTION_CONTEST_TASKS_CITATION,
296
+ ),
297
+
298
+
299
+ CaptionContestConfig(
300
+ name='explanation',
301
+ description=_EXPLANATION_DESCRIPTION,
302
+ features=[
303
+ 'image',
304
+ 'contest_number',
305
+ 'image_location',
306
+ 'image_description',
307
+ 'image_uncanny_description',
308
+ 'entities',
309
+ 'questions',
310
+ 'caption_choices',
311
+ 'from_description',
312
+ ],
313
+ label_classes=None,
314
+ data_url='https://storage.googleapis.com/ai2-jack-public/caption_contest_data_public/huggingface_hub/v1.0/explanation.zip',
315
+ url='www.capcon.dev',
316
+ citation=_CAPTION_CONTEST_TASKS_CITATION,
317
+ ),
318
+
319
+ CaptionContestConfig(
320
+ name='explanation_from_pixels',
321
+ description=_EXPLANATION_DESCRIPTION,
322
+ features=[
323
+ 'image',
324
+ 'contest_number',
325
+ 'caption_choices',
326
+ ],
327
+ label_classes=None,
328
+ data_url='https://storage.googleapis.com/ai2-jack-public/caption_contest_data_public/huggingface_hub/v1.0/explanation_from_pixels.zip',
329
+ url='www.capcon.dev',
330
+ citation=_CAPTION_CONTEST_TASKS_CITATION,
331
+ ),
332
+ ] + _get_configs_crossvals()
333
+
334
+
335
+ def _info(self):
336
+ features = {feature: datasets.Value("string") for feature in self.config.features}
337
+ # things are strings except for contest_number, entities, questions, and caption choices (if not explanation)
338
+ features['contest_number'] = datasets.Value("int32")
339
+ if 'explanation' not in self.config.name:
340
+ features['caption_choices'] = datasets.features.Sequence(datasets.Value("string"))
341
+
342
+ if 'entities' in features:
343
+ features['entities'] = datasets.features.Sequence(datasets.Value("string"))
344
+
345
+ if 'questions' in features:
346
+ features['questions'] = datasets.features.Sequence(datasets.Value("string"))
347
+
348
+ if 'image' in features:
349
+ features['image'] = datasets.Image()
350
+
351
+ features['label'] = datasets.Value("string")
352
+ features['n_tokens_label'] = datasets.Value("int32")
353
+ features['instance_id'] = datasets.Value("string")
354
+
355
+ return datasets.DatasetInfo(
356
+ description=_CAPTION_CONTEST_DESCRIPTION + self.config.description,
357
+ features=datasets.Features(features),
358
+ homepage=self.config.url,
359
+ citation=self.config.citation
360
+ )
361
+
362
+ def _split_generators(self, dl_manager):
363
+ dl_dir = dl_manager.download_and_extract(self.config.data_url) or ""
364
+ self.images_dir = dl_manager.download_and_extract(_IMAGES_URL)
365
+ task_name = _get_task_name_from_data_url(self.config.data_url)
366
+ dl_dir = os.path.join(dl_dir, task_name)
367
+
368
+ return [
369
+ datasets.SplitGenerator(
370
+ name=datasets.Split.TRAIN,
371
+ gen_kwargs={
372
+ "data_file": os.path.join(dl_dir, "train.jsonl"),
373
+ "split": datasets.Split.TRAIN,
374
+ },
375
+ ),
376
+ datasets.SplitGenerator(
377
+ name=datasets.Split.VALIDATION,
378
+ gen_kwargs={
379
+ "data_file": os.path.join(dl_dir, "val.jsonl"),
380
+ "split": datasets.Split.VALIDATION,
381
+ },
382
+ ),
383
+ datasets.SplitGenerator(
384
+ name=datasets.Split.TEST,
385
+ gen_kwargs={
386
+ "data_file": os.path.join(dl_dir, "test.jsonl"),
387
+ "split": datasets.Split.TEST,
388
+ },
389
+ ),
390
+ ]
391
+
392
+ def _generate_examples(self, data_file, split):
393
+ with open(data_file, encoding="utf-8") as f:
394
+ for line in f:
395
+ row = json.loads(line)
396
+ with open(self.images_dir + "/all_contest_images/{}.jpeg".format(row['contest_number']), "rb") as image:
397
+ row['image'] = {"path": self.images_dir + "/all_contest_images/{}.jpeg".format(row['contest_number']),
398
+ "bytes": image.read()}
399
+ yield row['instance_id'], row
400
+
401
+ def _get_task_name_from_data_url(data_url):
402
+ return data_url.split("/")[-1].split(".")[0]