Datasets:

Languages:
English
Multilinguality:
multilingual
Size Categories:
n>1T
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
original
ArXiv:
License:
xiaohk commited on
Commit
91021eb
1 Parent(s): a9dd349

Update loading script

Browse files
Files changed (2) hide show
  1. README.md +3 -3
  2. diffusiondb.py +165 -27
README.md CHANGED
@@ -89,7 +89,7 @@ task_ids:
89
 
90
  ### Dataset Summary
91
 
92
- DiffusionDB is the first large-scale text-to-image prompt dataset. It contains 14 million images generated by Stable Diffusion using prompts and hyperparameters specified by real users.
93
 
94
  DiffusionDB is publicly available at [🤗 Hugging Face Dataset](https://huggingface.co/datasets/poloclub/diffusiondb).
95
 
@@ -247,8 +247,8 @@ You can use the Hugging Face [`Datasets`](https://huggingface.co/docs/datasets/q
247
  import numpy as np
248
  from datasets import load_dataset
249
 
250
- # Load the dataset with the `random_1k` subset
251
- dataset = load_dataset('poloclub/diffusiondb', 'random_1k')
252
  ```
253
 
254
  #### Method 2. Use the PoloClub Downloader
 
89
 
90
  ### Dataset Summary
91
 
92
+ DiffusionDB is the first large-scale text-to-image prompt dataset. It contains **14 million** images generated by Stable Diffusion using prompts and hyperparameters specified by real users.
93
 
94
  DiffusionDB is publicly available at [🤗 Hugging Face Dataset](https://huggingface.co/datasets/poloclub/diffusiondb).
95
 
 
247
  import numpy as np
248
  from datasets import load_dataset
249
 
250
+ # Load the dataset with the `random_1k [large]` subset
251
+ dataset = load_dataset('poloclub/diffusiondb', 'random_1k [large]')
252
  ```
253
 
254
  #### Method 2. Use the PoloClub Downloader
diffusiondb.py CHANGED
@@ -2,6 +2,7 @@
2
  # MIT License
3
  """Loading script for DiffusionDB."""
4
 
 
5
  import numpy as np
6
  import pandas as pd
7
 
@@ -10,6 +11,8 @@ from os.path import join, basename
10
  from huggingface_hub import hf_hub_url
11
 
12
  import datasets
 
 
13
 
14
  # Find for instance the citation on arxiv or on the dataset repo/website
15
  _CITATION = """\
@@ -34,22 +37,40 @@ designing human-AI interaction tools to help users more easily use these models.
34
 
35
  _HOMEPAGE = "https://poloclub.github.io/diffusiondb"
36
  _LICENSE = "CC0 1.0"
37
- _VERSION = datasets.Version("0.9.0")
38
 
39
  # Programmatically generate the URLs for different parts
40
  # hf_hub_url() provides a more flexible way to resolve the file URLs
41
  # https://huggingface.co/datasets/poloclub/diffusiondb/resolve/main/images/part-000001.zip
42
  _URLS = {}
 
43
  _PART_IDS = range(1, 2001)
 
44
 
45
  for i in _PART_IDS:
46
  _URLS[i] = hf_hub_url(
47
  "datasets/poloclub/diffusiondb", filename=f"images/part-{i:06}.zip"
48
  )
49
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  # Add the metadata parquet URL as well
51
  _URLS["metadata"] = hf_hub_url(
52
- "datasets/poloclub/diffusiondb", filename=f"metadata.parquet"
 
 
 
 
53
  )
54
 
55
  _SAMPLER_DICT = {
@@ -68,14 +89,16 @@ _SAMPLER_DICT = {
68
  class DiffusionDBConfig(datasets.BuilderConfig):
69
  """BuilderConfig for DiffusionDB."""
70
 
71
- def __init__(self, part_ids, **kwargs):
72
  """BuilderConfig for DiffusionDB.
73
  Args:
74
  part_ids([int]): A list of part_ids.
 
75
  **kwargs: keyword arguments forwarded to super.
76
  """
77
  super(DiffusionDBConfig, self).__init__(version=_VERSION, **kwargs)
78
  self.part_ids = part_ids
 
79
 
80
 
81
  class DiffusionDB(datasets.GeneratorBasedBuilder):
@@ -87,11 +110,54 @@ class DiffusionDB(datasets.GeneratorBasedBuilder):
87
  # as the config key)
88
  for num_k in [1, 5, 10, 50, 100, 500, 1000]:
89
  for sampling in ["first", "random"]:
90
- num_k_str = f"{num_k}k" if num_k < 1000 else f"{num_k // 1000}m"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
91
 
92
  if sampling == "random":
93
  # Name the config
94
- cur_name = "random_" + num_k_str
95
 
96
  # Add a short description for each config
97
  cur_description = (
@@ -99,31 +165,46 @@ class DiffusionDB(datasets.GeneratorBasedBuilder):
99
  )
100
 
101
  # Sample part_ids
102
- part_ids = np.random.choice(_PART_IDS, num_k, replace=False).tolist()
 
 
 
103
  else:
104
  # Name the config
105
- cur_name = "first_" + num_k_str
106
 
107
  # Add a short description for each config
108
  cur_description = f"The first {num_k_str} images in this dataset with their prompts and parameters"
109
 
110
  # Sample part_ids
111
- part_ids = _PART_IDS[1 : num_k + 1]
 
112
 
113
  # Create configs
114
  BUILDER_CONFIGS.append(
115
  DiffusionDBConfig(
116
  name=cur_name,
117
  part_ids=part_ids,
 
118
  description=cur_description,
119
  ),
120
  )
121
 
122
- # For the 2k option, random sample and first parts are the same
123
  BUILDER_CONFIGS.append(
124
  DiffusionDBConfig(
125
- name="all",
126
  part_ids=_PART_IDS,
 
 
 
 
 
 
 
 
 
 
127
  description="All images with their prompts and parameters",
128
  ),
129
  )
@@ -131,28 +212,44 @@ class DiffusionDB(datasets.GeneratorBasedBuilder):
131
  # We also prove a text-only option, which loads the meatadata parquet file
132
  BUILDER_CONFIGS.append(
133
  DiffusionDBConfig(
134
- name="text_only",
135
  part_ids=[],
 
 
 
 
 
 
 
 
 
 
136
  description="Only include all prompts and parameters (no image)",
137
  ),
138
  )
139
 
140
  # Default to only load 1k random images
141
- DEFAULT_CONFIG_NAME = "random_1k"
142
 
143
  def _info(self):
144
  """Specify the information of DiffusionDB."""
145
 
146
- if self.config.name == "text_only":
147
  features = datasets.Features(
148
  {
149
  "image_name": datasets.Value("string"),
150
  "prompt": datasets.Value("string"),
151
- "part_id": datasets.Value("int64"),
152
- "seed": datasets.Value("int64"),
153
- "step": datasets.Value("int64"),
154
  "cfg": datasets.Value("float32"),
155
  "sampler": datasets.Value("string"),
 
 
 
 
 
 
156
  },
157
  )
158
 
@@ -161,10 +258,16 @@ class DiffusionDB(datasets.GeneratorBasedBuilder):
161
  {
162
  "image": datasets.Image(),
163
  "prompt": datasets.Value("string"),
164
- "seed": datasets.Value("int64"),
165
- "step": datasets.Value("int64"),
166
  "cfg": datasets.Value("float32"),
167
  "sampler": datasets.Value("string"),
 
 
 
 
 
 
168
  },
169
  )
170
 
@@ -192,17 +295,21 @@ class DiffusionDB(datasets.GeneratorBasedBuilder):
192
  data_dirs = []
193
  json_paths = []
194
 
 
 
 
 
 
 
195
  for cur_part_id in self.config.part_ids:
196
- cur_url = _URLS[cur_part_id]
197
  data_dir = dl_manager.download_and_extract(cur_url)
198
 
199
  data_dirs.append(data_dir)
200
  json_paths.append(join(data_dir, f"part-{cur_part_id:06}.json"))
201
 
202
- # If we are in text_only mode, we only need to download the parquet file
203
- # For convenience, we save the parquet path in `data_dirs`
204
- if self.config.name == "text_only":
205
- data_dirs = [dl_manager.download(_URLS["metadata"])]
206
 
207
  return [
208
  datasets.SplitGenerator(
@@ -211,19 +318,20 @@ class DiffusionDB(datasets.GeneratorBasedBuilder):
211
  gen_kwargs={
212
  "data_dirs": data_dirs,
213
  "json_paths": json_paths,
 
214
  },
215
  ),
216
  ]
217
 
218
- def _generate_examples(self, data_dirs, json_paths):
219
  # This method handles input defined in _split_generators to yield
220
  # (key, example) tuples from the dataset.
221
  # The `key` is for legacy reasons (tfds) and is not important in itself,
222
  # but must be unique for each example.
223
 
224
  # Load the metadata parquet file if the config is text_only
225
- if self.config.name == "text_only":
226
- metadata_df = pd.read_parquet(data_dirs[0])
227
  for _, row in metadata_df.iterrows():
228
  yield row["image_name"], {
229
  "image_name": row["image_name"],
@@ -233,13 +341,31 @@ class DiffusionDB(datasets.GeneratorBasedBuilder):
233
  "step": row["step"],
234
  "cfg": row["cfg"],
235
  "sampler": _SAMPLER_DICT[int(row["sampler"])],
 
 
 
 
 
 
236
  }
237
 
238
  else:
239
- # Iterate through all extracted zip folders for images
240
  num_data_dirs = len(data_dirs)
241
  assert num_data_dirs == len(json_paths)
242
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
  for k in range(num_data_dirs):
244
  cur_data_dir = data_dirs[k]
245
  cur_json_path = json_paths[k]
@@ -250,6 +376,12 @@ class DiffusionDB(datasets.GeneratorBasedBuilder):
250
  img_params = json_data[img_name]
251
  img_path = join(cur_data_dir, img_name)
252
 
 
 
 
 
 
 
253
  # Yields examples as (key, example) tuples
254
  yield img_name, {
255
  "image": {
@@ -261,4 +393,10 @@ class DiffusionDB(datasets.GeneratorBasedBuilder):
261
  "step": int(img_params["st"]),
262
  "cfg": float(img_params["c"]),
263
  "sampler": img_params["sa"],
 
 
 
 
 
 
264
  }
 
2
  # MIT License
3
  """Loading script for DiffusionDB."""
4
 
5
+ import re
6
  import numpy as np
7
  import pandas as pd
8
 
 
11
  from huggingface_hub import hf_hub_url
12
 
13
  import datasets
14
+ import pyarrow as pa
15
+ import pyarrow.parquet as pq
16
 
17
  # Find for instance the citation on arxiv or on the dataset repo/website
18
  _CITATION = """\
 
37
 
38
  _HOMEPAGE = "https://poloclub.github.io/diffusiondb"
39
  _LICENSE = "CC0 1.0"
40
+ _VERSION = datasets.Version("0.9.1")
41
 
42
  # Programmatically generate the URLs for different parts
43
  # hf_hub_url() provides a more flexible way to resolve the file URLs
44
  # https://huggingface.co/datasets/poloclub/diffusiondb/resolve/main/images/part-000001.zip
45
  _URLS = {}
46
+ _URLS_LARGE = {}
47
  _PART_IDS = range(1, 2001)
48
+ _PART_IDS_LARGE = range(1, 14001)
49
 
50
  for i in _PART_IDS:
51
  _URLS[i] = hf_hub_url(
52
  "datasets/poloclub/diffusiondb", filename=f"images/part-{i:06}.zip"
53
  )
54
 
55
+ for i in _PART_IDS_LARGE:
56
+ if i < 10001:
57
+ _URLS_LARGE[i] = hf_hub_url(
58
+ "datasets/poloclub/diffusiondb",
59
+ filename=f"diffusiondb-large-part-1/part-{i:06}.zip",
60
+ )
61
+ else:
62
+ _URLS_LARGE[i] = hf_hub_url(
63
+ "datasets/poloclub/diffusiondb",
64
+ filename=f"diffusiondb-large-part-2/part-{i:06}.zip",
65
+ )
66
+
67
  # Add the metadata parquet URL as well
68
  _URLS["metadata"] = hf_hub_url(
69
+ "datasets/poloclub/diffusiondb", filename="metadata.parquet"
70
+ )
71
+
72
+ _URLS_LARGE["metadata"] = hf_hub_url(
73
+ "datasets/poloclub/diffusiondb", filename="metadata-large.parquet"
74
  )
75
 
76
  _SAMPLER_DICT = {
 
89
  class DiffusionDBConfig(datasets.BuilderConfig):
90
  """BuilderConfig for DiffusionDB."""
91
 
92
+ def __init__(self, part_ids, is_large, **kwargs):
93
  """BuilderConfig for DiffusionDB.
94
  Args:
95
  part_ids([int]): A list of part_ids.
96
+ is_large(bool): If downloading data from DiffusionDB Large (14 million)
97
  **kwargs: keyword arguments forwarded to super.
98
  """
99
  super(DiffusionDBConfig, self).__init__(version=_VERSION, **kwargs)
100
  self.part_ids = part_ids
101
+ self.is_large = is_large
102
 
103
 
104
  class DiffusionDB(datasets.GeneratorBasedBuilder):
 
110
  # as the config key)
111
  for num_k in [1, 5, 10, 50, 100, 500, 1000]:
112
  for sampling in ["first", "random"]:
113
+ for is_large in [False, True]:
114
+ num_k_str = f"{num_k}k" if num_k < 1000 else f"{num_k // 1000}m"
115
+ subset_str = " [large]" if is_large else " [2m]"
116
+
117
+ if sampling == "random":
118
+ # Name the config
119
+ cur_name = "random_" + num_k_str + subset_str
120
+
121
+ # Add a short description for each config
122
+ cur_description = (
123
+ f"Random {num_k_str} images with their prompts and parameters"
124
+ )
125
+
126
+ # Sample part_ids
127
+ total_part_ids = _PART_IDS_LARGE if is_large else _PART_IDS
128
+ part_ids = np.random.choice(
129
+ total_part_ids, num_k, replace=False
130
+ ).tolist()
131
+ else:
132
+ # Name the config
133
+ cur_name = "first_" + num_k_str + subset_str
134
+
135
+ # Add a short description for each config
136
+ cur_description = f"The first {num_k_str} images in this dataset with their prompts and parameters"
137
+
138
+ # Sample part_ids
139
+ total_part_ids = _PART_IDS_LARGE if is_large else _PART_IDS
140
+ part_ids = total_part_ids[1 : num_k + 1]
141
+
142
+ # Create configs
143
+ BUILDER_CONFIGS.append(
144
+ DiffusionDBConfig(
145
+ name=cur_name,
146
+ part_ids=part_ids,
147
+ is_large=is_large,
148
+ description=cur_description,
149
+ ),
150
+ )
151
+
152
+ # Add few more options for Large only
153
+ for num_k in [5000, 10000]:
154
+ for sampling in ["first", "random"]:
155
+ num_k_str = f"{num_k // 1000}m"
156
+ subset_str = " [large]"
157
 
158
  if sampling == "random":
159
  # Name the config
160
+ cur_name = "random_" + num_k_str + subset_str
161
 
162
  # Add a short description for each config
163
  cur_description = (
 
165
  )
166
 
167
  # Sample part_ids
168
+ total_part_ids = _PART_IDS_LARGE
169
+ part_ids = np.random.choice(
170
+ total_part_ids, num_k, replace=False
171
+ ).tolist()
172
  else:
173
  # Name the config
174
+ cur_name = "first_" + num_k_str + subset_str
175
 
176
  # Add a short description for each config
177
  cur_description = f"The first {num_k_str} images in this dataset with their prompts and parameters"
178
 
179
  # Sample part_ids
180
+ total_part_ids = _PART_IDS_LARGE
181
+ part_ids = total_part_ids[1 : num_k + 1]
182
 
183
  # Create configs
184
  BUILDER_CONFIGS.append(
185
  DiffusionDBConfig(
186
  name=cur_name,
187
  part_ids=part_ids,
188
+ is_large=True,
189
  description=cur_description,
190
  ),
191
  )
192
 
193
+ # Need to manually add all (2m) and all (large)
194
  BUILDER_CONFIGS.append(
195
  DiffusionDBConfig(
196
+ name="all [2m]",
197
  part_ids=_PART_IDS,
198
+ is_large=False,
199
+ description="All images with their prompts and parameters",
200
+ ),
201
+ )
202
+
203
+ BUILDER_CONFIGS.append(
204
+ DiffusionDBConfig(
205
+ name="all [large]",
206
+ part_ids=_PART_IDS_LARGE,
207
+ is_large=True,
208
  description="All images with their prompts and parameters",
209
  ),
210
  )
 
212
  # We also prove a text-only option, which loads the meatadata parquet file
213
  BUILDER_CONFIGS.append(
214
  DiffusionDBConfig(
215
+ name="text_only [2m]",
216
  part_ids=[],
217
+ is_large=False,
218
+ description="Only include all prompts and parameters (no image)",
219
+ ),
220
+ )
221
+
222
+ BUILDER_CONFIGS.append(
223
+ DiffusionDBConfig(
224
+ name="text_only [large]",
225
+ part_ids=[],
226
+ is_large=True,
227
  description="Only include all prompts and parameters (no image)",
228
  ),
229
  )
230
 
231
  # Default to only load 1k random images
232
+ DEFAULT_CONFIG_NAME = "random_1k [2m]"
233
 
234
  def _info(self):
235
  """Specify the information of DiffusionDB."""
236
 
237
+ if "text_only" in self.config.name:
238
  features = datasets.Features(
239
  {
240
  "image_name": datasets.Value("string"),
241
  "prompt": datasets.Value("string"),
242
+ "part_id": datasets.Value("uint16"),
243
+ "seed": datasets.Value("uint32"),
244
+ "step": datasets.Value("uint16"),
245
  "cfg": datasets.Value("float32"),
246
  "sampler": datasets.Value("string"),
247
+ "width": datasets.Value("uint16"),
248
+ "height": datasets.Value("uint16"),
249
+ "user_name": datasets.Value("string"),
250
+ "timestamp": datasets.Value("timestamp[us, tz=UTC]"),
251
+ "image_nsfw": datasets.Value("float32"),
252
+ "prompt_nsfw": datasets.Value("float32"),
253
  },
254
  )
255
 
 
258
  {
259
  "image": datasets.Image(),
260
  "prompt": datasets.Value("string"),
261
+ "seed": datasets.Value("uint32"),
262
+ "step": datasets.Value("uint16"),
263
  "cfg": datasets.Value("float32"),
264
  "sampler": datasets.Value("string"),
265
+ "width": datasets.Value("uint16"),
266
+ "height": datasets.Value("uint16"),
267
+ "user_name": datasets.Value("string"),
268
+ "timestamp": datasets.Value("timestamp[us, tz=UTC]"),
269
+ "image_nsfw": datasets.Value("float32"),
270
+ "prompt_nsfw": datasets.Value("float32"),
271
  },
272
  )
273
 
 
295
  data_dirs = []
296
  json_paths = []
297
 
298
+ # Resolve the urls
299
+ if self.config.is_large:
300
+ urls = _URLS_LARGE
301
+ else:
302
+ urls = _URLS
303
+
304
  for cur_part_id in self.config.part_ids:
305
+ cur_url = urls[cur_part_id]
306
  data_dir = dl_manager.download_and_extract(cur_url)
307
 
308
  data_dirs.append(data_dir)
309
  json_paths.append(join(data_dir, f"part-{cur_part_id:06}.json"))
310
 
311
+ # Also download the metadata table
312
+ metadata_path = dl_manager.download(urls["metadata"])
 
 
313
 
314
  return [
315
  datasets.SplitGenerator(
 
318
  gen_kwargs={
319
  "data_dirs": data_dirs,
320
  "json_paths": json_paths,
321
+ "metadata_path": metadata_path,
322
  },
323
  ),
324
  ]
325
 
326
+ def _generate_examples(self, data_dirs, json_paths, metadata_path):
327
  # This method handles input defined in _split_generators to yield
328
  # (key, example) tuples from the dataset.
329
  # The `key` is for legacy reasons (tfds) and is not important in itself,
330
  # but must be unique for each example.
331
 
332
  # Load the metadata parquet file if the config is text_only
333
+ if "text_only" in self.config.name:
334
+ metadata_df = pd.read_parquet(metadata_path)
335
  for _, row in metadata_df.iterrows():
336
  yield row["image_name"], {
337
  "image_name": row["image_name"],
 
341
  "step": row["step"],
342
  "cfg": row["cfg"],
343
  "sampler": _SAMPLER_DICT[int(row["sampler"])],
344
+ "width": row["width"],
345
+ "height": row["height"],
346
+ "user_name": row["user_name"],
347
+ "timestamp": row["timestamp"],
348
+ "image_nsfw": row["image_nsfw"],
349
+ "prompt_nsfw": row["prompt_nsfw"],
350
  }
351
 
352
  else:
 
353
  num_data_dirs = len(data_dirs)
354
  assert num_data_dirs == len(json_paths)
355
 
356
+ # Read the metadata table (only rows with the needed part_ids)
357
+ part_ids = []
358
+ for path in json_paths:
359
+ cur_id = int(re.sub(r"part-(\d+)\.json", r"\1", basename(path)))
360
+ part_ids.append(cur_id)
361
+
362
+ metadata_table = pq.read_table(
363
+ metadata_path,
364
+ filters=[("part_id", "in", part_ids)],
365
+ )
366
+ print(metadata_table.shape)
367
+
368
+ # Iterate through all extracted zip folders for images
369
  for k in range(num_data_dirs):
370
  cur_data_dir = data_dirs[k]
371
  cur_json_path = json_paths[k]
 
376
  img_params = json_data[img_name]
377
  img_path = join(cur_data_dir, img_name)
378
 
379
+ # Query the meta data
380
+ row_mask = pa.compute.equal(
381
+ metadata_table.column("image_name"), img_name
382
+ )
383
+ query_result = metadata_table.filter(row_mask)
384
+
385
  # Yields examples as (key, example) tuples
386
  yield img_name, {
387
  "image": {
 
393
  "step": int(img_params["st"]),
394
  "cfg": float(img_params["c"]),
395
  "sampler": img_params["sa"],
396
+ "width": query_result["width"][0].as_py(),
397
+ "height": query_result["height"][0].as_py(),
398
+ "user_name": query_result["user_name"][0].as_py(),
399
+ "timestamp": query_result["timestamp"][0].as_py(),
400
+ "image_nsfw": query_result["image_nsfw"][0].as_py(),
401
+ "prompt_nsfw": query_result["prompt_nsfw"][0].as_py(),
402
  }