Datasets:
File size: 15,668 Bytes
c919e37 91021eb c919e37 27978ee c919e37 27978ee c919e37 91021eb c919e37 27978ee c919e37 91021eb c919e37 91021eb c919e37 27978ee 406b324 27978ee 91021eb 406b324 91021eb 406b324 91021eb 406b324 91021eb 406b324 91021eb 27978ee 406b324 91021eb 406b324 27978ee c919e37 712834a c919e37 91021eb c919e37 91021eb c919e37 91021eb c919e37 e075e40 c919e37 91021eb 5fa48ba 91021eb 5fa48ba 91021eb 5fa48ba 91021eb 5fa48ba c919e37 5fa48ba c919e37 91021eb c919e37 5fa48ba c919e37 91021eb c919e37 91021eb c919e37 91021eb e075e40 5fa48ba e075e40 91021eb 5fa48ba 91021eb e075e40 27978ee 5fa48ba 27978ee 91021eb 5fa48ba 91021eb 27978ee fb620fb c919e37 5fa48ba c919e37 91021eb 27978ee 91021eb 27978ee 91021eb 27978ee 91021eb 27978ee 91021eb 27978ee c919e37 91021eb c919e37 91021eb c919e37 91021eb 27978ee c919e37 91021eb c919e37 91021eb c919e37 27978ee 91021eb 27978ee 712834a 91021eb 4de7acd 91021eb c919e37 27978ee 91021eb 965466d 91021eb 27978ee 712834a 27978ee 965466d 91021eb 27978ee 965466d 4de7acd 965466d 27978ee |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 |
# Copyright 2022 Jay Wang, Evan Montoya, David Munechika, Alex Yang, Ben Hoover, Polo Chau
# MIT License
"""Loading script for DiffusionDB."""
import re
import numpy as np
import pandas as pd
from json import load, dump
from os.path import join, basename
from huggingface_hub import hf_hub_url
import datasets
# Find for instance the citation on arxiv or on the dataset repo/website
_CITATION = """\
@article{wangDiffusionDBLargescalePrompt2022,
title = {{{DiffusionDB}}: {{A}} Large-Scale Prompt Gallery Dataset for Text-to-Image Generative Models},
author = {Wang, Zijie J. and Montoya, Evan and Munechika, David and Yang, Haoyang and Hoover, Benjamin and Chau, Duen Horng},
year = {2022},
journal = {arXiv:2210.14896 [cs]},
url = {https://arxiv.org/abs/2210.14896}
}
"""
# You can copy an official description
_DESCRIPTION = """
DiffusionDB is the first large-scale text-to-image prompt dataset. It contains 2
million images generated by Stable Diffusion using prompts and hyperparameters
specified by real users. The unprecedented scale and diversity of this
human-actuated dataset provide exciting research opportunities in understanding
the interplay between prompts and generative models, detecting deepfakes, and
designing human-AI interaction tools to help users more easily use these models.
"""
_HOMEPAGE = "https://poloclub.github.io/diffusiondb"
_LICENSE = "CC0 1.0"
_VERSION = datasets.Version("0.9.1")
# Programmatically generate the URLs for different parts
# hf_hub_url() provides a more flexible way to resolve the file URLs
# https://huggingface.co/datasets/poloclub/diffusiondb/resolve/main/images/part-000001.zip
_URLS = {}
_URLS_LARGE = {}
_PART_IDS = range(1, 2001)
_PART_IDS_LARGE = range(1, 14001)
for i in _PART_IDS:
_URLS[i] = hf_hub_url(
"poloclub/diffusiondb",
filename=f"images/part-{i:06}.zip",
repo_type="dataset",
)
for i in _PART_IDS_LARGE:
if i < 10001:
_URLS_LARGE[i] = hf_hub_url(
"poloclub/diffusiondb",
filename=f"diffusiondb-large-part-1/part-{i:06}.zip",
repo_type="dataset",
)
else:
_URLS_LARGE[i] = hf_hub_url(
"poloclub/diffusiondb",
filename=f"diffusiondb-large-part-2/part-{i:06}.zip",
repo_type="dataset",
)
# Add the metadata parquet URL as well
_URLS["metadata"] = hf_hub_url(
"poloclub/diffusiondb", filename="metadata.parquet", repo_type="dataset"
)
_URLS_LARGE["metadata"] = hf_hub_url(
"poloclub/diffusiondb",
filename="metadata-large.parquet",
repo_type="dataset",
)
_SAMPLER_DICT = {
1: "ddim",
2: "plms",
3: "k_euler",
4: "k_euler_ancestral",
5: "ddik_heunm",
6: "k_dpm_2",
7: "k_dpm_2_ancestral",
8: "k_lms",
9: "others",
}
class DiffusionDBConfig(datasets.BuilderConfig):
"""BuilderConfig for DiffusionDB."""
def __init__(self, part_ids, is_large, **kwargs):
"""BuilderConfig for DiffusionDB.
Args:
part_ids([int]): A list of part_ids.
is_large(bool): If downloading data from DiffusionDB Large (14 million)
**kwargs: keyword arguments forwarded to super.
"""
super(DiffusionDBConfig, self).__init__(version=_VERSION, **kwargs)
self.part_ids = part_ids
self.is_large = is_large
class DiffusionDB(datasets.GeneratorBasedBuilder):
"""A large-scale text-to-image prompt gallery dataset based on Stable Diffusion."""
BUILDER_CONFIGS = []
# Programmatically generate configuration options (HF requires to use a string
# as the config key)
for num_k in [1, 5, 10, 50, 100, 500, 1000]:
for sampling in ["first", "random"]:
for is_large in [False, True]:
num_k_str = f"{num_k}k" if num_k < 1000 else f"{num_k // 1000}m"
subset_str = "large_" if is_large else "2m_"
if sampling == "random":
# Name the config
cur_name = subset_str + "random_" + num_k_str
# Add a short description for each config
cur_description = (
f"Random {num_k_str} images with their prompts and parameters"
)
# Sample part_ids
total_part_ids = _PART_IDS_LARGE if is_large else _PART_IDS
part_ids = np.random.choice(
total_part_ids, num_k, replace=False
).tolist()
else:
# Name the config
cur_name = subset_str + "first_" + num_k_str
# Add a short description for each config
cur_description = f"The first {num_k_str} images in this dataset with their prompts and parameters"
# Sample part_ids
total_part_ids = _PART_IDS_LARGE if is_large else _PART_IDS
part_ids = total_part_ids[1 : num_k + 1]
# Create configs
BUILDER_CONFIGS.append(
DiffusionDBConfig(
name=cur_name,
part_ids=part_ids,
is_large=is_large,
description=cur_description,
),
)
# Add few more options for Large only
for num_k in [5000, 10000]:
for sampling in ["first", "random"]:
num_k_str = f"{num_k // 1000}m"
subset_str = "large_"
if sampling == "random":
# Name the config
cur_name = subset_str + "random_" + num_k_str
# Add a short description for each config
cur_description = (
f"Random {num_k_str} images with their prompts and parameters"
)
# Sample part_ids
total_part_ids = _PART_IDS_LARGE
part_ids = np.random.choice(
total_part_ids, num_k, replace=False
).tolist()
else:
# Name the config
cur_name = subset_str + "first_" + num_k_str
# Add a short description for each config
cur_description = f"The first {num_k_str} images in this dataset with their prompts and parameters"
# Sample part_ids
total_part_ids = _PART_IDS_LARGE
part_ids = total_part_ids[1 : num_k + 1]
# Create configs
BUILDER_CONFIGS.append(
DiffusionDBConfig(
name=cur_name,
part_ids=part_ids,
is_large=True,
description=cur_description,
),
)
# Need to manually add all (2m) and all (large)
BUILDER_CONFIGS.append(
DiffusionDBConfig(
name="2m_all",
part_ids=_PART_IDS,
is_large=False,
description="All images with their prompts and parameters",
),
)
BUILDER_CONFIGS.append(
DiffusionDBConfig(
name="large_all",
part_ids=_PART_IDS_LARGE,
is_large=True,
description="All images with their prompts and parameters",
),
)
# We also prove a text-only option, which loads the meatadata parquet file
BUILDER_CONFIGS.append(
DiffusionDBConfig(
name="2m_text_only",
part_ids=[],
is_large=False,
description="Only include all prompts and parameters (no image)",
),
)
BUILDER_CONFIGS.append(
DiffusionDBConfig(
name="large_text_only",
part_ids=[],
is_large=True,
description="Only include all prompts and parameters (no image)",
),
)
# Add a random 1k from 2M as the first entry point to show on HF data viewer
# Sample part_ids
part_ids = np.random.choice(_PART_IDS, 1000, replace=False).tolist()
BUILDER_CONFIGS.append(
DiffusionDBConfig(
name="1k_random_2m",
part_ids=part_ids,
is_large=False,
description="Another random 1k images with meta data from DiffusionDB 2M",
),
)
# Default to only load 1k random images
DEFAULT_CONFIG_NAME = "2m_random_1k"
def _info(self):
"""Specify the information of DiffusionDB."""
if "text_only" in self.config.name:
features = datasets.Features(
{
"image_name": datasets.Value("string"),
"prompt": datasets.Value("string"),
"part_id": datasets.Value("uint16"),
"seed": datasets.Value("uint32"),
"step": datasets.Value("uint16"),
"cfg": datasets.Value("float32"),
"sampler": datasets.Value("string"),
"width": datasets.Value("uint16"),
"height": datasets.Value("uint16"),
"user_name": datasets.Value("string"),
"timestamp": datasets.Value("timestamp[us, tz=UTC]"),
"image_nsfw": datasets.Value("float32"),
"prompt_nsfw": datasets.Value("float32"),
},
)
else:
features = datasets.Features(
{
"image": datasets.Image(),
"prompt": datasets.Value("string"),
"seed": datasets.Value("uint32"),
"step": datasets.Value("uint16"),
"cfg": datasets.Value("float32"),
"sampler": datasets.Value("string"),
"width": datasets.Value("uint16"),
"height": datasets.Value("uint16"),
"user_name": datasets.Value("string"),
"timestamp": datasets.Value("timestamp[us, tz=UTC]"),
"image_nsfw": datasets.Value("float32"),
"prompt_nsfw": datasets.Value("float32"),
},
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
# If several configurations are possible (listed in BUILDER_CONFIGS),
# the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLS It can accept any type or nested list/dict
# and will give back the same structure with the url replaced with path
# to local files. By default the archives will be extracted and a path
# to a cached folder where they are extracted is returned instead of the
# archive
# Download and extract zip files of all sampled part_ids
data_dirs = []
json_paths = []
# Resolve the urls
if self.config.is_large:
urls = _URLS_LARGE
else:
urls = _URLS
for cur_part_id in self.config.part_ids:
cur_url = urls[cur_part_id]
data_dir = dl_manager.download_and_extract(cur_url)
data_dirs.append(data_dir)
json_paths.append(join(data_dir, f"part-{cur_part_id:06}.json"))
# Also download the metadata table
metadata_path = dl_manager.download(urls["metadata"])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"data_dirs": data_dirs,
"json_paths": json_paths,
"metadata_path": metadata_path,
},
),
]
def _generate_examples(self, data_dirs, json_paths, metadata_path):
# This method handles input defined in _split_generators to yield
# (key, example) tuples from the dataset.
# The `key` is for legacy reasons (tfds) and is not important in itself,
# but must be unique for each example.
# Load the metadata parquet file if the config is text_only
if "text_only" in self.config.name:
metadata_df = pd.read_parquet(metadata_path)
for _, row in metadata_df.iterrows():
yield row["image_name"], {
"image_name": row["image_name"],
"prompt": row["prompt"],
"part_id": row["part_id"],
"seed": row["seed"],
"step": row["step"],
"cfg": row["cfg"],
"sampler": _SAMPLER_DICT[int(row["sampler"])],
"width": row["width"],
"height": row["height"],
"user_name": row["user_name"],
"timestamp": None
if pd.isnull(row["timestamp"])
else row["timestamp"],
"image_nsfw": row["image_nsfw"],
"prompt_nsfw": row["prompt_nsfw"],
}
else:
num_data_dirs = len(data_dirs)
assert num_data_dirs == len(json_paths)
# Read the metadata table (only rows with the needed part_ids)
part_ids = []
for path in json_paths:
cur_id = int(re.sub(r"part-(\d+)\.json", r"\1", basename(path)))
part_ids.append(cur_id)
# We have to use pandas here to make the dataset preview work (it
# uses streaming mode)
metadata_table = pd.read_parquet(
metadata_path,
filters=[("part_id", "in", part_ids)],
)
# Iterate through all extracted zip folders for images
for k in range(num_data_dirs):
cur_data_dir = data_dirs[k]
cur_json_path = json_paths[k]
json_data = load(open(cur_json_path, "r", encoding="utf8"))
for img_name in json_data:
img_params = json_data[img_name]
img_path = join(cur_data_dir, img_name)
# Query the metadata
query_result = metadata_table.query(f'`image_name` == "{img_name}"')
# Yields examples as (key, example) tuples
yield img_name, {
"image": {
"path": img_path,
"bytes": open(img_path, "rb").read(),
},
"prompt": img_params["p"],
"seed": int(img_params["se"]),
"step": int(img_params["st"]),
"cfg": float(img_params["c"]),
"sampler": img_params["sa"],
"width": query_result["width"].to_list()[0],
"height": query_result["height"].to_list()[0],
"user_name": query_result["user_name"].to_list()[0],
"timestamp": None
if pd.isnull(query_result["timestamp"].to_list()[0])
else query_result["timestamp"].to_list()[0],
"image_nsfw": query_result["image_nsfw"].to_list()[0],
"prompt_nsfw": query_result["prompt_nsfw"].to_list()[0],
}
|