parquet-converter commited on
Commit
7758e1e
·
1 Parent(s): c1abc29

Update parquet files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. README.md +0 -857
  2. abandoned/red_caps-train.parquet +3 -0
  3. abandoned_2017/red_caps-train.parquet +3 -0
  4. abandoned_2018/red_caps-train.parquet +3 -0
  5. abandoned_2019/red_caps-train.parquet +3 -0
  6. abandoned_2020/red_caps-train.parquet +3 -0
  7. abandonedporn/red_caps-train.parquet +3 -0
  8. abandonedporn_2017/red_caps-train.parquet +3 -0
  9. abandonedporn_2018/red_caps-train.parquet +3 -0
  10. abandonedporn_2019/red_caps-train.parquet +3 -0
  11. abandonedporn_2020/red_caps-train.parquet +3 -0
  12. absoluteunits/red_caps-train.parquet +3 -0
  13. absoluteunits_2018/red_caps-train.parquet +3 -0
  14. absoluteunits_2019/red_caps-train.parquet +3 -0
  15. absoluteunits_2020/red_caps-train.parquet +3 -0
  16. airplants/red_caps-train.parquet +3 -0
  17. airplants_2017/red_caps-train.parquet +3 -0
  18. airplants_2018/red_caps-train.parquet +3 -0
  19. airplants_2019/red_caps-train.parquet +3 -0
  20. airplants_2020/red_caps-train.parquet +3 -0
  21. all/red_caps-train-00000-of-00007.parquet +3 -0
  22. all/red_caps-train-00001-of-00007.parquet +3 -0
  23. all/red_caps-train-00002-of-00007.parquet +3 -0
  24. all/red_caps-train-00003-of-00007.parquet +3 -0
  25. all/red_caps-train-00004-of-00007.parquet +3 -0
  26. all/red_caps-train-00005-of-00007.parquet +3 -0
  27. all/red_caps-train-00006-of-00007.parquet +3 -0
  28. alltheanimals/red_caps-train.parquet +3 -0
  29. alltheanimals_2019/red_caps-train.parquet +3 -0
  30. alltheanimals_2020/red_caps-train.parquet +3 -0
  31. amateurphotography/red_caps-train.parquet +3 -0
  32. amateurphotography_2017/red_caps-train.parquet +3 -0
  33. amateurphotography_2018/red_caps-train.parquet +3 -0
  34. amateurphotography_2019/red_caps-train.parquet +3 -0
  35. amateurphotography_2020/red_caps-train.parquet +3 -0
  36. amateurroomporn/red_caps-train.parquet +3 -0
  37. amateurroomporn_2017/red_caps-train.parquet +3 -0
  38. amateurroomporn_2018/red_caps-train.parquet +3 -0
  39. amateurroomporn_2019/red_caps-train.parquet +3 -0
  40. amateurroomporn_2020/red_caps-train.parquet +3 -0
  41. animalporn/red_caps-train.parquet +3 -0
  42. animalporn_2017/red_caps-train.parquet +3 -0
  43. animalporn_2018/red_caps-train.parquet +3 -0
  44. animalporn_2019/red_caps-train.parquet +3 -0
  45. animalporn_2020/red_caps-train.parquet +3 -0
  46. antiques/red_caps-train.parquet +3 -0
  47. antiques_2017/red_caps-train.parquet +3 -0
  48. antiques_2018/red_caps-train.parquet +3 -0
  49. antiques_2019/red_caps-train.parquet +3 -0
  50. antiques_2020/red_caps-train.parquet +3 -0
README.md DELETED
@@ -1,857 +0,0 @@
1
- ---
2
- annotations_creators:
3
- - found
4
- language_creators:
5
- - found
6
- language:
7
- - en
8
- license:
9
- - cc-by-4.0
10
- multilinguality:
11
- - monolingual
12
- size_categories:
13
- - 10M<n<100M
14
- source_datasets:
15
- - original
16
- task_categories:
17
- - image-to-text
18
- task_ids:
19
- - image-captioning
20
- paperswithcode_id: redcaps
21
- pretty_name: RedCaps
22
- dataset_info:
23
- features:
24
- - name: image_id
25
- dtype: string
26
- - name: author
27
- dtype: string
28
- - name: image_url
29
- dtype: string
30
- - name: raw_caption
31
- dtype: string
32
- - name: caption
33
- dtype: string
34
- - name: subreddit
35
- dtype:
36
- class_label:
37
- names:
38
- 0: abandonedporn
39
- 1: abandoned
40
- 2: absoluteunits
41
- 3: airplants
42
- 4: alltheanimals
43
- 5: amateurphotography
44
- 6: amateurroomporn
45
- 7: animalporn
46
- 8: antiques
47
- 9: antkeeping
48
- 10: ants
49
- 11: aquariums
50
- 12: architectureporn
51
- 13: artefactporn
52
- 14: astronomy
53
- 15: astrophotography
54
- 16: australiancattledog
55
- 17: australianshepherd
56
- 18: autumnporn
57
- 19: averagebattlestations
58
- 20: awwducational
59
- 21: awwnverts
60
- 22: axolotls
61
- 23: backpacking
62
- 24: backyardchickens
63
- 25: baking
64
- 26: ballpython
65
- 27: barista
66
- 28: bassfishing
67
- 29: battlestations
68
- 30: bbq
69
- 31: beagle
70
- 32: beardeddragons
71
- 33: beekeeping
72
- 34: beerandpizza
73
- 35: beerporn
74
- 36: beerwithaview
75
- 37: beginnerwoodworking
76
- 38: bengalcats
77
- 39: bento
78
- 40: bernesemountaindogs
79
- 41: berries
80
- 42: bettafish
81
- 43: bicycling
82
- 44: bikecommuting
83
- 45: birding
84
- 46: birdphotography
85
- 47: birdpics
86
- 48: birdsofprey
87
- 49: birds
88
- 50: blackcats
89
- 51: blacksmith
90
- 52: bladesmith
91
- 53: boatporn
92
- 54: bonsai
93
- 55: bookporn
94
- 56: bookshelf
95
- 57: bordercollie
96
- 58: bostonterrier
97
- 59: botanicalporn
98
- 60: breadit
99
- 61: breakfastfood
100
- 62: breakfast
101
- 63: bridgeporn
102
- 64: brochet
103
- 65: budgetfood
104
- 66: budgies
105
- 67: bulldogs
106
- 68: burgers
107
- 69: butterflies
108
- 70: cabinporn
109
- 71: cactus
110
- 72: cakedecorating
111
- 73: cakewin
112
- 74: cameras
113
- 75: campingandhiking
114
- 76: camping
115
- 77: carnivorousplants
116
- 78: carpentry
117
- 79: carporn
118
- 80: cassetteculture
119
- 81: castiron
120
- 82: castles
121
- 83: casualknitting
122
- 84: catpictures
123
- 85: cats
124
- 86: ceramics
125
- 87: chameleons
126
- 88: charcuterie
127
- 89: cheesemaking
128
- 90: cheese
129
- 91: chefit
130
- 92: chefknives
131
- 93: chickens
132
- 94: chihuahua
133
- 95: chinchilla
134
- 96: chinesefood
135
- 97: churchporn
136
- 98: cider
137
- 99: cityporn
138
- 100: classiccars
139
- 101: cockatiel
140
- 102: cocktails
141
- 103: coffeestations
142
- 104: coins
143
- 105: cookiedecorating
144
- 106: corgi
145
- 107: cornsnakes
146
- 108: cozyplaces
147
- 109: crafts
148
- 110: crestedgecko
149
- 111: crochet
150
- 112: crossstitch
151
- 113: crows
152
- 114: crystals
153
- 115: cupcakes
154
- 116: dachshund
155
- 117: damnthatsinteresting
156
- 118: desertporn
157
- 119: designmyroom
158
- 120: desksetup
159
- 121: dessertporn
160
- 122: dessert
161
- 123: diy
162
- 124: dobermanpinscher
163
- 125: doggos
164
- 126: dogpictures
165
- 127: drunkencookery
166
- 128: duck
167
- 129: dumpsterdiving
168
- 130: earthporn
169
- 131: eatsandwiches
170
- 132: embroidery
171
- 133: entomology
172
- 134: equestrian
173
- 135: espresso
174
- 136: exposureporn
175
- 137: eyebleach
176
- 138: f1porn
177
- 139: farming
178
- 140: femalelivingspace
179
- 141: fermentation
180
- 142: ferrets
181
- 143: fireporn
182
- 144: fishing
183
- 145: fish
184
- 146: flowers
185
- 147: flyfishing
186
- 148: foodporn
187
- 149: food
188
- 150: foraging
189
- 151: fossilporn
190
- 152: fountainpens
191
- 153: foxes
192
- 154: frenchbulldogs
193
- 155: frogs
194
- 156: gardening
195
- 157: gardenwild
196
- 158: geckos
197
- 159: gemstones
198
- 160: geologyporn
199
- 161: germanshepherds
200
- 162: glutenfree
201
- 163: goldenretrievers
202
- 164: goldfish
203
- 165: gold
204
- 166: greatpyrenees
205
- 167: grilledcheese
206
- 168: grilling
207
- 169: guineapigs
208
- 170: gunporn
209
- 171: guns
210
- 172: hamsters
211
- 173: handtools
212
- 174: healthyfood
213
- 175: hedgehog
214
- 176: helicopters
215
- 177: herpetology
216
- 178: hiking
217
- 179: homestead
218
- 180: horses
219
- 181: hotpeppers
220
- 182: houseplants
221
- 183: houseporn
222
- 184: husky
223
- 185: icecreamery
224
- 186: indoorgarden
225
- 187: infrastructureporn
226
- 188: insects
227
- 189: instantpot
228
- 190: interestingasfuck
229
- 191: interiordesign
230
- 192: itookapicture
231
- 193: jellyfish
232
- 194: jewelry
233
- 195: kayakfishing
234
- 196: kayaking
235
- 197: ketorecipes
236
- 198: knifeporn
237
- 199: knives
238
- 200: labrador
239
- 201: leathercraft
240
- 202: leopardgeckos
241
- 203: lizards
242
- 204: lookatmydog
243
- 205: macarons
244
- 206: machineporn
245
- 207: macroporn
246
- 208: malelivingspace
247
- 209: mead
248
- 210: mealprepsunday
249
- 211: mechanicalkeyboards
250
- 212: mechanicalpencils
251
- 213: melts
252
- 214: metalworking
253
- 215: microgreens
254
- 216: microporn
255
- 217: mildlyinteresting
256
- 218: mineralporn
257
- 219: monitors
258
- 220: monstera
259
- 221: mostbeautiful
260
- 222: motorcycleporn
261
- 223: muglife
262
- 224: mushroomgrowers
263
- 225: mushroomporn
264
- 226: mushrooms
265
- 227: mycology
266
- 228: natureisfuckinglit
267
- 229: natureporn
268
- 230: nebelung
269
- 231: orchids
270
- 232: otters
271
- 233: outdoors
272
- 234: owls
273
- 235: parrots
274
- 236: pelletgrills
275
- 237: pens
276
- 238: perfectfit
277
- 239: permaculture
278
- 240: photocritique
279
- 241: photographs
280
- 242: pics
281
- 243: pitbulls
282
- 244: pizza
283
- 245: plantbaseddiet
284
- 246: plantedtank
285
- 247: plantsandpots
286
- 248: plants
287
- 249: pomeranians
288
- 250: pottery
289
- 251: pourpainting
290
- 252: proplifting
291
- 253: pugs
292
- 254: pug
293
- 255: quilting
294
- 256: rabbits
295
- 257: ramen
296
- 258: rarepuppers
297
- 259: reeftank
298
- 260: reptiles
299
- 261: resincasting
300
- 262: roomporn
301
- 263: roses
302
- 264: rottweiler
303
- 265: ruralporn
304
- 266: sailing
305
- 267: salsasnobs
306
- 268: samoyeds
307
- 269: savagegarden
308
- 270: scotch
309
- 271: seaporn
310
- 272: seriouseats
311
- 273: sewing
312
- 274: sharks
313
- 275: shiba
314
- 276: shihtzu
315
- 277: shrimptank
316
- 278: siamesecats
317
- 279: siberiancats
318
- 280: silverbugs
319
- 281: skyporn
320
- 282: sloths
321
- 283: smoking
322
- 284: snails
323
- 285: snakes
324
- 286: sneakers
325
- 287: sneks
326
- 288: somethingimade
327
- 289: soup
328
- 290: sourdough
329
- 291: sousvide
330
- 292: spaceporn
331
- 293: spicy
332
- 294: spiderbro
333
- 295: spiders
334
- 296: squirrels
335
- 297: steak
336
- 298: streetphotography
337
- 299: succulents
338
- 300: superbowl
339
- 301: supermodelcats
340
- 302: sushi
341
- 303: tacos
342
- 304: tarantulas
343
- 305: tastyfood
344
- 306: teaporn
345
- 307: tea
346
- 308: tequila
347
- 309: terrariums
348
- 310: thedepthsbelow
349
- 311: thriftstorehauls
350
- 312: tinyanimalsonfingers
351
- 313: tonightsdinner
352
- 314: toolporn
353
- 315: tools
354
- 316: torties
355
- 317: tortoise
356
- 318: tractors
357
- 319: trailrunning
358
- 320: trains
359
- 321: trucks
360
- 322: turtle
361
- 323: underwaterphotography
362
- 324: upcycling
363
- 325: urbanexploration
364
- 326: urbanhell
365
- 327: veganfoodporn
366
- 328: veganrecipes
367
- 329: vegetablegardening
368
- 330: vegetarian
369
- 331: villageporn
370
- 332: vintageaudio
371
- 333: vintage
372
- 334: vinyl
373
- 335: volumeeating
374
- 336: watches
375
- 337: waterporn
376
- 338: weatherporn
377
- 339: wewantplates
378
- 340: wildernessbackpacking
379
- 341: wildlifephotography
380
- 342: wine
381
- 343: winterporn
382
- 344: woodcarving
383
- 345: woodworking
384
- 346: workbenches
385
- 347: workspaces
386
- 348: yarnaddicts
387
- 349: zerowaste
388
- - name: score
389
- dtype: int32
390
- - name: created_utc
391
- dtype: timestamp[s, tz=UTC]
392
- - name: permalink
393
- dtype: string
394
- - name: crosspost_parents
395
- sequence: string
396
- config_name: all
397
- splits:
398
- - name: train
399
- num_bytes: 3378544525
400
- num_examples: 12011121
401
- download_size: 1061908181
402
- dataset_size: 3378544525
403
- ---
404
-
405
- # Dataset Card for RedCaps
406
-
407
- ## Table of Contents
408
- - [Table of Contents](#table-of-contents)
409
- - [Dataset Description](#dataset-description)
410
- - [Dataset Summary](#dataset-summary)
411
- - [Dataset Preprocessing](#dataset-preprocessing)
412
- - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
413
- - [Languages](#languages)
414
- - [Dataset Structure](#dataset-structure)
415
- - [Data Instances](#data-instances)
416
- - [Data Fields](#data-fields)
417
- - [Data Splits](#data-splits)
418
- - [Dataset Creation](#dataset-creation)
419
- - [Curation Rationale](#curation-rationale)
420
- - [Source Data](#source-data)
421
- - [Annotations](#annotations)
422
- - [Personal and Sensitive Information](#personal-and-sensitive-information)
423
- - [Considerations for Using the Data](#considerations-for-using-the-data)
424
- - [Social Impact of Dataset](#social-impact-of-dataset)
425
- - [Discussion of Biases](#discussion-of-biases)
426
- - [Other Known Limitations](#other-known-limitations)
427
- - [Additional Information](#additional-information)
428
- - [Dataset Curators](#dataset-curators)
429
- - [Licensing Information](#licensing-information)
430
- - [Citation Information](#citation-information)
431
- - [Contributions](#contributions)
432
-
433
- ## Dataset Description
434
-
435
- - **Homepage:** [RedCaps homepage](https://redcaps.xyz/)
436
- - **Repository:** [RedCaps repository](https://github.com/redcaps-dataset/redcaps-downloader)
437
- - **Paper:** [RedCaps: web-curated image-text data created by the people, for the people](https://arxiv.org/abs/2111.11431)
438
- - **Leaderboard:**
439
- - **Point of Contact:** [Karan Desai](mailto:kdexd@umich.edu)
440
-
441
- ### Dataset Summary
442
-
443
- RedCaps is a large-scale dataset of 12M image-text pairs collected from Reddit.
444
- Images and captions from Reddit depict and describe a wide variety of objects and scenes.
445
- The data is collected from a manually curated set of subreddits (350 total),
446
- which give coarse image labels and allow steering of the dataset composition
447
- without labeling individual instances. RedCaps data is created *by the people, for the people* – it contains everyday things that users like to share on social media, for example hobbies (r/crafts) and pets (r/shiba). Captions often contain specific and
448
- fine-grained descriptions (northern cardinal, taj mahal). Subreddit names provide relevant image
449
- labels (r/shiba) even when captions may not (mlem!), and sometimes may group many visually
450
- unrelated images through a common semantic meaning (r/perfectfit).
451
-
452
- ### Dataset Preprocessing
453
-
454
- This dataset doesn't download the images locally by default. Instead, it exposes URLs to the images. To fetch the images, use the following code:
455
-
456
- ```python
457
- from concurrent.futures import ThreadPoolExecutor
458
- from functools import partial
459
- import io
460
- import urllib
461
-
462
- import PIL.Image
463
-
464
- from datasets import load_dataset
465
- from datasets.utils.file_utils import get_datasets_user_agent
466
-
467
-
468
- USER_AGENT = get_datasets_user_agent()
469
-
470
-
471
- def fetch_single_image(image_url, timeout=None, retries=0):
472
- for _ in range(retries + 1):
473
- try:
474
- request = urllib.request.Request(
475
- image_url,
476
- data=None,
477
- headers={"user-agent": USER_AGENT},
478
- )
479
- with urllib.request.urlopen(request, timeout=timeout) as req:
480
- image = PIL.Image.open(io.BytesIO(req.read()))
481
- break
482
- except Exception:
483
- image = None
484
- return image
485
-
486
-
487
- def fetch_images(batch, num_threads, timeout=None, retries=0):
488
- fetch_single_image_with_args = partial(fetch_single_image, timeout=timeout, retries=retries)
489
- with ThreadPoolExecutor(max_workers=num_threads) as executor:
490
- batch["image"] = list(executor.map(fetch_single_image_with_args, batch["image_url"]))
491
- return batch
492
-
493
-
494
- num_threads = 20
495
- dset = load_dataset("red_caps", "rabbits_2017")
496
- dset = dset.map(fetch_images, batched=True, batch_size=100, fn_kwargs={"num_threads": num_threads})
497
- ```
498
-
499
- Some image links point to more than one image. You can process and downloaded those as follows:
500
-
501
- ```python
502
- from concurrent.futures import ThreadPoolExecutor
503
- from functools import partial
504
- import io
505
- import os
506
- import re
507
- import urllib
508
-
509
- import PIL.Image
510
-
511
- import datasets
512
- from datasets import load_dataset
513
- from datasets.utils.file_utils import get_datasets_user_agent
514
-
515
-
516
- USER_AGENT = get_datasets_user_agent()
517
-
518
-
519
- def fetch_single_image(image_url, timeout=None, retries=0):
520
- for _ in range(retries + 1):
521
- try:
522
- request = urllib.request.Request(
523
- image_url,
524
- data=None,
525
- headers={"user-agent": USER_AGENT},
526
- )
527
- with urllib.request.urlopen(request, timeout=timeout) as req:
528
- image = PIL.Image.open(io.BytesIO(req.read()))
529
- break
530
- except Exception:
531
- image = None
532
- return image
533
-
534
-
535
- def fetch_images(batch, num_threads, timeout=None, retries=0):
536
- fetch_single_image_with_args = partial(fetch_single_image, timeout=timeout, retries=retries)
537
- with ThreadPoolExecutor(max_workers=num_threads) as executor:
538
- batch["image"] = list(executor.map(lambda image_urls: [fetch_single_image_with_args(image_url) for image_url in image_urls], batch["image_url"]))
539
- return batch
540
-
541
-
542
- def process_image_urls(batch):
543
- processed_batch_image_urls = []
544
- for image_url in batch["image_url"]:
545
- processed_example_image_urls = []
546
- image_url_splits = re.findall(r"http\S+", image_url)
547
- for image_url_split in image_url_splits:
548
- if "imgur" in image_url_split and "," in image_url_split:
549
- for image_url_part in image_url_split.split(","):
550
- if not image_url_part:
551
- continue
552
- image_url_part = image_url_part.strip()
553
- root, ext = os.path.splitext(image_url_part)
554
- if not root.startswith("http"):
555
- root = "http://i.imgur.com/" + root
556
- root = root.split("#")[0]
557
- if not ext:
558
- ext = ".jpg"
559
- ext = re.split(r"[?%]", ext)[0]
560
- image_url_part = root + ext
561
- processed_example_image_urls.append(image_url_part)
562
- else:
563
- processed_example_image_urls.append(image_url_split)
564
- processed_batch_image_urls.append(processed_example_image_urls)
565
- batch["image_url"] = processed_batch_image_urls
566
- return batch
567
-
568
-
569
- dset = load_dataset("red_caps", "rabbits_2017")
570
- dset = dset.map(process_image_urls, batched=True, num_proc=4)
571
- features = dset["train"].features.copy()
572
- features["image"] = datasets.Sequence(datasets.Image())
573
- num_threads = 20
574
- dset = dset.map(fetch_images, batched=True, batch_size=100, features=features, fn_kwargs={"num_threads": num_threads})
575
- ```
576
-
577
- Note that in the above code, we use the `datasets.Sequence` feature to represent a list of images for the multi-image links.
578
-
579
- ### Supported Tasks and Leaderboards
580
-
581
- From the paper:
582
- > We have used our dataset to train deep neural networks that perform image captioning, and
583
- that learn transferable visual representations for a variety of downstream visual recognition tasks
584
- (image classification, object detection, instance segmentation).
585
-
586
- > We anticipate that the dataset could be used for a variety of vision-and-language (V&L) tasks,
587
- such as image or text retrieval or text-to-image synthesis.
588
-
589
- ### Languages
590
-
591
- All of the subreddits in RedCaps use English as their primary language.
592
-
593
- ## Dataset Structure
594
-
595
- ### Data Instances
596
-
597
- Each instance in RedCaps represents a single Reddit image post:
598
-
599
- ```
600
- {
601
- 'image_id': 'bpzj7r',
602
- 'author': 'djasz1',
603
- 'image_url': 'https://i.redd.it/ho0wntksivy21.jpg',
604
- 'raw_caption': 'Found on a friend’s property in the Keys FL. She is now happily living in my house.',
605
- 'caption': 'found on a friend's property in the keys fl. she is now happily living in my house.', 'subreddit': 3,
606
- 'score': 72,
607
- 'created_utc': datetime.datetime(2019, 5, 18, 1, 36, 41),
608
- 'permalink': '/r/airplants/comments/bpzj7r/found_on_a_friends_property_in_the_keys_fl_she_is/', 'crosspost_parents': None
609
- }
610
- ```
611
-
612
- ### Data Fields
613
-
614
- - `image_id`: Unique alphanumeric ID of the image post (assigned by Reddit).
615
- - `author`: Reddit username of the image post author.
616
- - `image_url`: Static URL for downloading the image associated with the post.
617
- - `raw_caption`: Textual description of the image, written by the post author.
618
- - `caption`: Cleaned version of "raw_caption" by us (see Q35).
619
- - `subreddit`: Name of subreddit where the post was submitted.
620
- - `score`: Net upvotes (discounting downvotes) received by the image post. This field is equal to `None` if the image post is a crosspost.
621
- - `created_utc`: Integer time epoch (in UTC) when the post was submitted to Reddit.
622
- - `permalink`: Partial URL of the Reddit post (https://reddit.com/<permalink>).
623
- - `crosspost_parents`: List of parent posts. This field is optional.
624
-
625
-
626
- ### Data Splits
627
-
628
- All the data is contained in training set. The training set has nearly 12M (12,011,111) instances.
629
-
630
- From the paper:
631
- > We intend our dataset to be primarily used for pre-training with one or more specific downstream task(s) in mind. Hence, all instances in our dataset would be used for training while
632
- the validation split is derived from downstream task(s). If users require a validation split, we
633
- recommend sampling it such that it follows the same subreddit distribution as entire dataset.
634
-
635
- ## Dataset Creation
636
-
637
- ### Curation Rationale
638
-
639
- From the paper:
640
- > Large datasets of image-text pairs are widely used for pre-training generic representations
641
- that transfer to a variety of downstream vision and vision-and-language tasks. Existing public
642
- datasets of this kind were curated from search engine results (SBU Captions [1]) or HTML
643
- alt-text from arbitrary web pages (Conceptual Captions [2, 31]). They performed complex
644
- data filtering to deal with noisy web data. Due to aggressive filtering, their data collection is
645
- inefficient and diversity is artificially supressed. We argue that the quality of data depends on
646
- its source, and the human intent behind its creation. In this work, we explore Reddit – a social
647
- media platform, for curating high quality data. We introduce RedCaps – a large dataset of
648
- 12M image-text pairs from Reddit. While we expect the use-cases of RedCaps to be similar to
649
- existing datasets, we discuss how Reddit as a data source leads to fast and lightweight collection,
650
- better data quality, lets us easily steer the data distribution, and facilitates ethically responsible data curation.
651
-
652
- ### Source Data
653
-
654
- #### Initial Data Collection and Normalization
655
-
656
- From the paper:
657
- > **Data Collection Pipeline**
658
- Reddit’s uniform structure allows us to parallelize data collection as independent tasks – each task
659
- involves collecting posts submitted to a single subreddit in one year. Our collection pipeline has three steps: (1) subreddit selection, (2) image post filtering, and (3) caption cleaning.
660
- **Step 1**. Subreddit selection: We collect data from a manually curated set of subreddits. Subreddits
661
- have their own rules, community norms, and moderators so curating subreddits allows us to steer the
662
- dataset’s composition without annotating individual instances. We select subreddits with a high volume of images posts, where images tend to be photographs (rather than memes, drawings, screenshots,
663
- etc) and post titles tend to describe image content (rather than making jokes, political commentary,
664
- etc). We do not select any NSFW, banned, or quarantined subreddits. We want to minimize the
665
- number of people that appear in RedCaps, so we omit subreddits whose primary purpose is to share or
666
- comment on images of people (such as celebrity pics or user selfies). We choose subreddits focused on
667
- general photography (r/pics, r/itookapicture), animals (r/axolotls, r/birdsofprey, r/dachshund),
668
- plants (r/roses, r/succulents), objects (r/classiccars, r/trains, r/mechanicalkeyboards), food
669
- (r/steak, r/macarons), scenery (r/cityporn1
670
- , r/desertporn), or activities (r/carpentry, r/kayaking).
671
- In total we collect data from 350 subreddits; the full list can be found in Appendix A.
672
- **Step 2**. Image post filtering: We use Pushshift [41] and Reddit [42, 43] APIs to download all image
673
- posts submitted to our selected subreddits from 2008–2020. Posts are collected at least six months
674
- after their creation to let upvotes stabilize. We only collect posts with images hosted on three domains:
675
- Reddit (i.redd.it), Imgur (i.imgur.com), and Flickr (staticflickr.com). Some image posts contain
676
- multiple images (gallery posts) – in this case we only collect the first image and associate it with
677
- the caption. We discard posts with < 2 upvotes to avoid unappealing content, and we discard posts
678
- marked NSFW (by their authors or subreddit moderators) to avoid pornographic or disturbing content.
679
- **Step 3**. Caption cleaning: We expect Reddit post titles to be less noisy than other large-scale
680
- sources of image captions such as alt-text [2, 31], so we apply minimal text cleaning. We lowercase
681
- captions and use ftfy [44] to remove character accents, emojis, and non-latin characters, following
682
- [29, 35, 36]. Then we apply simple pattern matching to discard all sub-strings enclosed in brackets
683
- ((.*), [.*]). These sub-strings usually give non-semantic information: original content tags [oc],
684
- image resolutions (800x600 px), camera specs (shot with iPhone), self-promotion [Instagram:
685
- @user], and other references (link in comments). Finally, like [31] we replace social media
686
- handles (words starting with ‘@’) with a [USR] token to protect user privacy and reduce redundancy.
687
- Due to such filtering, ≈12K (0.1%) captions in our dataset are empty strings. We do not discard them,
688
- as subreddit names alone provide meaningful supervision. Unlike CC-3M or CC-12M that discard
689
- captions without nouns or that don’t overlap image tags, we do not discard any instances in this step.
690
- Through this pipeline, we collect 13.4M instances from 350 subreddits. Our collection pipeline is
691
- less resource-intensive than existing datasets – we do not require webpage crawlers, search engines,
692
- or large databases of indexed webpages. RedCaps is easily extensible in the future by selecting more
693
- subreddits and collecting posts from future years. Next, we perform additional filtering to mitigate
694
- user privacy risks and harmful stereotypes in RedCaps, resulting in final size of 12M instances.
695
-
696
- #### Who are the source language producers?
697
-
698
- Reddit is the singular data source for RedCaps.
699
-
700
- ### Annotations
701
-
702
- #### Annotation process
703
-
704
- The dataset is built using fully automatic data collection pipeline which doesn't require any human annotators.
705
-
706
- #### Who are the annotators?
707
-
708
- The annotation process doesn't require any human annotators.
709
-
710
- ### Personal and Sensitive Information
711
-
712
- From the paper:
713
- > **Does the dataset relate to people?**
714
- The dataset pertains to people in that people wrote the captions and posted images to Reddit
715
- that we curate in RedCaps. We made specific design choices while curating RedCaps to avoid
716
- large quantities of images containing people:
717
- (a) We collect data from manually curated subreddits in which most contain primarily pertains
718
- to animals, objects, places, or activities. We exclude all subreddits whose primary purpose
719
- is to share and describe images of people (such as celebrity photos or user selfies).
720
- (b) We use an off-the-shelf face detector to find and remove images with potential presence of
721
- human faces. We manually checked 50K random images in RedCaps (Q16) and found 79
722
- images with identifiable human faces – the entire dataset may have ≈19K (0.15%) images
723
- with identifiable people. Refer Section 2.2 in the main paper.
724
-
725
- > **Is it possible to identify one or more natural persons, either directly or indirectly (i.e., in
726
- combination with other data) from the dataset?**
727
- Yes, all instances in RedCaps include Reddit usernames of their post authors. This could be
728
- used to look up the Reddit user profile, and some Reddit users may have identifying information
729
- in their profiles. Some images may contain human faces which could be identified by
730
- appearance. However, note that all this information is already public on Reddit, and searching it
731
- in RedCaps is no easier than searching directly on Reddit.
732
-
733
- > **Were the individuals in question notified about the data collection?**
734
- No. Reddit users are anonymous by default, and are not required to share their personal contact
735
- information (email, phone numbers, etc.). Hence, the only way to notify the authors of RedCaps
736
- image posts is by sending them private messages on Reddit. This is practically difficult to do
737
- manually, and will be classified as spam and blocked by Reddit if attempted to programmatically
738
- send a templated message to millions of users.
739
-
740
- > **Did the individuals in question consent to the collection and use of their data?**
741
- Users did not explicitly consent to the use of their data in our dataset. However, by uploading
742
- their data on Reddit, they consent that it would appear on the Reddit plaform and will be
743
- accessible via the official Reddit API (which we use to collect RedCaps).
744
-
745
- > **If consent was obtained, were the consenting individuals provided with a mechanism to
746
- revoke their consent in the future or for certain uses?**
747
- Users have full control over the presence of their data in our dataset. If users wish to revoke
748
- their consent, they can delete the underlying Reddit post – it will be automatically removed
749
- dfrom RedCaps since we distributed images as URLs. Moreover, we provide an opt-out request
750
- form on our dataset website for anybody to request removal of an individual instance if it is
751
- potentially harmful (e.g. NSFW, violates privacy, harmful stereotypes, etc.).
752
-
753
- ## Considerations for Using the Data
754
-
755
- ### Social Impact of Dataset
756
-
757
- From the paper:
758
- > **Has an analysis of the potential impact of the dataset and its use on data subjects (e.g.,
759
- a data protection impact analysis) been conducted?**
760
- No.
761
-
762
- ### Discussion of Biases
763
-
764
- From the paper:
765
- > **Harmful Stereotypes**: Another concern with
766
- Reddit data is that images or language may represent harmful stereotypes about gender, race, or other
767
- characteristics of people [48, 49, 51]. We select only non-NSFW subreddits with active moderation
768
- for collecting data. This stands in contrast to less curated uses of Reddit data, such as GPT-2 [35]
769
- whose training data includes at least 63K documents from banned or quarantined subreddits which
770
- may contain toxic language [53]. We attempt to further reduce harmful stereotypes in two ways:
771
- > * **NSFW images**: We use the InceptionV3 [54] model from [55] to filter images detected as porn or hentai with confidence ≥ 0.9. Similar to face filtering, we estimated precision of our filtering and estimated amount of missed detections, shown in Table 1. The model detects 87K images with low
772
- precision (∼1%) – most detections are non-NSFW images with pink and beige hues.
773
- > * **Potentially derogatory language**: We filter instances whose captions contain words or phrases from a common blocklist [56]. It is important to note that such coarse filtering might suppress language from marginalized groups reclaiming slurs [51]; however, as RedCaps is not intended to describe people, we believe this is a pragmatic tradeoff to avoid propagating harmful labels.
774
-
775
- > **Reddit demographics**: Reddit’s user demographics are not representative of the population at large.
776
- Compared to US adults, Reddit users skew male (69% vs 49%), young (58% 18-29 years old vs
777
- 22%), college educated (36% vs 28%), and politically liberal (41% vs 25%) [57]. Reddit users
778
- are predominantly white (63%) [57], and 49% of desktop traffic to Reddit comes from the United
779
- States [58]. All of the subreddits in RedCaps use English as their primary language. Taken together,
780
- these demographic biases likely also bias the types of objects and places that appear in images on
781
- Reddit, and the language used to describe these images. We do not offer explicit countermeasures to
782
- these biases, but users of RedCaps should keep in mind that size doesn’t guarantee diversity [51].
783
- Subtler issues may also exist, such as imbalanced representation of demographic groups [59] or
784
- gender bias in object co-occurrence [60] or language [61]. These are hard to control in internet
785
- data, so we release RedCaps with explicit instructions on suitable use-cases; specifically requesting models not be trained to identify people, or make decisions that impact people. We document these instructions and other terms-of-use in a datasheet [45], provided in Appendix G.
786
-
787
- > **Does the dataset contain data that, if viewed directly, might be offensive, insulting, threatening, or might otherwise cause anxiety?**
788
- The scale of RedCaps means that we are unable to verify the contents of all images and
789
- captions. However we have tried to minimize the possibility that RedCaps contains data that
790
- might be offensive, insulting, threatening, or might cause anxiety via the following mitigations:
791
- (a) We manually curate the set of subreddits from which to collect data; we only chose
792
- subreddits that are not marked NSFW and which generally contain non-offensive content.
793
- (b) Within our curated subreddits, we did not include any posts marked NSFW.
794
- (c) We removed all instances whose captions contained any of the 400 potentially offensive
795
- words or phrases. Refer Section 2.2 in the main paper.
796
- (d) We remove all instances whose images were flagged NSFW by an off-the-shelf detector.
797
- We manually checked 50K random images in RedCaps and found one image containing
798
- nudity (exposed buttocks; no identifiable face). Refer Section 2.2 in the main paper
799
-
800
- > **Does the dataset identify any subpopulations (e.g., by age, gender)?**
801
- RedCaps does not explicitly identify any subpopulations. Since some images contain people
802
- and captions are free-form natural language written by Reddit users, it is possible that some
803
- captions may identify people appearing in individual images as part of a subpopulation.
804
-
805
- > **Were any ethical review processes conducted (e.g., by an institutional review board)?**
806
- We did not conduct a formal ethical review process via institutional review boards. However,
807
- as described in Section 2.2 of the main paper and Q16 we employed several filtering mechanisms
808
- to try and remove instances that could be problematic.
809
-
810
- ### Other Known Limitations
811
-
812
- From the paper:
813
- > **Are there any errors, sources of noise, or redundancies in the dataset?**
814
- RedCaps is noisy by design since image-text pairs on the internet are noisy and unstructured.
815
- Some instances may also have duplicate images and captions – Reddit users may have shared
816
- the same image post in multiple subreddits. Such redundancies constitute a very small fraction
817
- of the dataset, and should have almost no effect in training large-scale models.
818
-
819
- > **Does the dataset contain data that might be considered confidential (e.g., data that is
820
- protected by legal privilege or by doctor-patient confidentiality, data that includes the
821
- content of individuals non-public communications)?**
822
- No, the subreddits included in RedCaps do not cover topics that may be considered confidential. All posts were publicly shared on Reddit prior to inclusion in RedCaps.
823
-
824
- ## Additional Information
825
-
826
- ### Dataset Curators
827
-
828
- From the paper:
829
- > Four researchers at the University of Michigan (affiliated as of 2021) have created RedCaps:
830
- Karan Desai, Gaurav Kaul, Zubin Aysola, and Justin Johnson.
831
-
832
- ### Licensing Information
833
-
834
- The image metadata is licensed under CC-BY 4.0 license. Additionally, uses of this dataset are subject to Reddit API terms (https://www.reddit.com/wiki/
835
- api-terms) and users must comply with Reddit User Agreeement, Content Policy,
836
- and Privacy Policy – all accessible at https://www.redditinc.com/policies.
837
-
838
- From the paper:
839
- > RedCaps should only be used for non-commercial research. RedCaps should not be used for any tasks that involve identifying features related to people (facial recognition, gender, age, ethnicity identification, etc.) or make decisions that impact people (mortgages, job applications, criminal sentences; or moderation decisions about user-uploaded data that could result in bans from a website). Any commercial and for-profit uses of RedCaps are restricted – it should not be used to train models that will be deployed in production systems as part of a product offered by businesses or government agencies.
840
-
841
-
842
- ### Citation Information
843
-
844
- ```bibtex
845
- @misc{desai2021redcaps,
846
- title={RedCaps: web-curated image-text data created by the people, for the people},
847
- author={Karan Desai and Gaurav Kaul and Zubin Aysola and Justin Johnson},
848
- year={2021},
849
- eprint={2111.11431},
850
- archivePrefix={arXiv},
851
- primaryClass={cs.CV}
852
- }
853
- ```
854
-
855
- ### Contributions
856
-
857
- Thanks to [@mariosasko](https://github.com/mariosasko) for adding this dataset.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
abandoned/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e6df6abf6db5141417dd42d8bacebd7a0d4a781f5a30d3dec46df1cd105ee2f2
3
+ size 1061048
abandoned_2017/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d82b5a9b12500c042e91710b090e2b9230271c567564428b4a7ead1499914f9
3
+ size 230591
abandoned_2018/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:450ee82384618dba4584e01bded5ea6fae0116978cd3fbae439db036fd352b7b
3
+ size 136388
abandoned_2019/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a55439103b34547626b603ee0871ad72e730130759baa2305107e4510826d47b
3
+ size 262244
abandoned_2020/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d90dd3abb717f2353e53b3868231e2f750440d9b5d3da4e8e3cd8ad9bebabb93
3
+ size 486952
abandonedporn/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f89239b48aa3d421467eda95ea41f38597f51d4bee6adfbe815e0a9f1e895189
3
+ size 9071973
abandonedporn_2017/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0565da1d8643eac1f288949414d0bdd5126fc8041655357f44895f3a6179eb31
3
+ size 3615377
abandonedporn_2018/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8256b8ca09df0874733fca886827def5b1ddd15a9b0d6802475a83110b350ffa
3
+ size 1292113
abandonedporn_2019/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:00eb782435b39b05b01c6116bb1041c22909c540e1e27ec8ee0220766559ae6e
3
+ size 1950966
abandonedporn_2020/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74ec5298d4464a1bee08c9ef06237f7735d839c8e9a8e8536145b86fec8dbd8c
3
+ size 2269242
absoluteunits/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07a1debcf6eeff62f1fb2e1a2c9a2bdb7cce5e5d77a153e8a11a311803daa53d
3
+ size 4171874
absoluteunits_2018/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd28d89504bc51ffb322229934dbb9b88dbdb3b9894819cd6bce1bdd6bb07c82
3
+ size 1092477
absoluteunits_2019/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e371818d06a95f04bad50989930f125a28cd896c0d0fb2f6cc16cb1ccdcdb19
3
+ size 2302788
absoluteunits_2020/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ce1f29ad118474e41e21188d592a25aed56e074cd454b331606610ef9c717d3
3
+ size 811422
airplants/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd4ce7d3c2d36c30a46c061ddfbdc891af31df6cceb7f7daa04dc42b3bb955e1
3
+ size 1508954
airplants_2017/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b51b79997fa523055ca664f6fe7ea37aafba4ac1fe3d0ed58ca3ae63e3939565
3
+ size 100077
airplants_2018/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f935a20a6545d94398dec84e75b538f4b198831abbe98bb8b0cafccf9c241a6
3
+ size 247699
airplants_2019/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90233666ffcf00afc1b3be2bc5d1fa02b3495fd5617be73e0e1c699d4e70c8b7
3
+ size 532957
airplants_2020/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2903ccfaee9cf4746b863f1ee77ddc16f643aea79f9b7fee249907839de89198
3
+ size 675504
all/red_caps-train-00000-of-00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b1fe80c447bc5babd100752817f42194ad68b85713813e217f06bc04939640b
3
+ size 288020818
all/red_caps-train-00001-of-00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdf095965a8dd838d0548e147219aa4d091929abebb6695b793fcf4eab776502
3
+ size 292738453
all/red_caps-train-00002-of-00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7551794b03cd7a5e3039cffd832e4279fc32fc82529f8a574566ccbc9720614f
3
+ size 287309562
all/red_caps-train-00003-of-00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7ab593020caee9ca82b5f45c0049def95ea2eaf17e140e25fe49ae903d5cef0
3
+ size 284710424
all/red_caps-train-00004-of-00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7c81366421a22e414e21272356d85b650090eb1fc3af5c70cad09e4acc0beada
3
+ size 312948201
all/red_caps-train-00005-of-00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f4779433a8d630e04a34b472231ebdd5d4ae377a3c576ce36184f41accdb79f7
3
+ size 289880571
all/red_caps-train-00006-of-00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a3bfc109f5d3f2817a1dea5af6edd25072339bff1a8855a5f0d40d8d9bd34334
3
+ size 137887250
alltheanimals/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac8e47ead303f83d064b390f465e427c12d00e85499025f4a11b7acd328b31e2
3
+ size 186049
alltheanimals_2019/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:82ac8937707aaade2304aaebc178a462e686f0602648d378f2ec5a89185f1b18
3
+ size 73029
alltheanimals_2020/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:62be926efb181dd476a9f9a9d6889409c26352cdb591c07a746742e7aaf1e956
3
+ size 126967
amateurphotography/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0df6e95c82e6e6a5f8acf853075657e77daa189ab556d192dae0e5e094039c42
3
+ size 2059239
amateurphotography_2017/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c8bed091b73f2a8cfb2cac467b028ede4033ef3a566ff01b95409edd62f47c3
3
+ size 453532
amateurphotography_2018/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ff173313e264be663f8bb4a717bde55b37699cfb588382467e0707665e9d4bc
3
+ size 304116
amateurphotography_2019/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:99d5e765660b5945451db760e0346798e1e061af63764eaadfa0ba39625d0c31
3
+ size 475919
amateurphotography_2020/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15abaf034a21ec61f28c841fc41065e841c7d3e81577581435d1745ab2a02c69
3
+ size 876001
amateurroomporn/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2afb91cefc89f3d784657d7d8260825b545227c911521f4b65ef7a56457ad2a3
3
+ size 1830175
amateurroomporn_2017/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6b489e61b0623fa5316acb3f030e87147f6ede3e50500a01ff028d63cfcb5c0
3
+ size 246399
amateurroomporn_2018/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8fd2795e7763e9a5e2e9282058f8c3780dcd08e357f5e2f03c77b1621bcbc63
3
+ size 386159
amateurroomporn_2019/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6a4f472d89c7c016cbbf53d87df0c1481e8dab8d4ff117233a12745d5231b71
3
+ size 537587
amateurroomporn_2020/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:727eaecf210714e6de5565b501fcd752df8fc4ccdf613c735f0ef75f9ec8c5eb
3
+ size 717199
animalporn/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01b66d3b36a24b17f62e57960bf5ded6f7730f9b091ff2adce75dfb8a1c67c25
3
+ size 2354869
animalporn_2017/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:405891d513e59a5b5d5e6f29b564da5ff6a0d0b8762ce2c837c204effb5e647f
3
+ size 1445452
animalporn_2018/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b66b005ebe801df478a023beb839775d675b9116579544cf291450a0cc7ed0bb
3
+ size 185883
animalporn_2019/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a233b40efa65356f2cdfe3adc4f721102bee2c86a49782ff0308393b5595bad5
3
+ size 327747
animalporn_2020/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:912536bb7583d10d6ddd581f3724f743f194ad6b58e05a150b451b3ef73b3e7f
3
+ size 449511
antiques/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a8a5bbf0a0862c3df27200d22a860635aa748a5c422f597d97b0a5479993158
3
+ size 3697417
antiques_2017/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f246f61ac658b674b00b5051c587a60d641326a41c6ebac6e029b15dc45ef0ab
3
+ size 1006762
antiques_2018/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86ce2f4a7c5d4fd30d4903d1671cad71ce123c205d799bce8942180f4a1f16da
3
+ size 528105
antiques_2019/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f2bf0c6f27dfa56c3f2f85d84b78734b06cecd92fcc3d6244da43d6f0395f08
3
+ size 801562
antiques_2020/red_caps-train.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7e21c0144d29b19f7e4732e5aaeb2a47fcee78817c77cb15c0392ca67942b321
3
+ size 1414616