Datasets:

Languages:
English
Multilinguality:
multilingual
Size Categories:
n>1T
Language Creators:
found
Annotations Creators:
no-annotation
Source Datasets:
original
ArXiv:
License:
xiaohk commited on
Commit
c919e37
1 Parent(s): 156c8f9

Add a loading script

Browse files
Files changed (1) hide show
  1. diffusiondb.py +187 -0
diffusiondb.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Jay Wang, Evan Montoya, David Munechika, Alex Yang, Ben Hoover, Polo Chau
2
+ # MIT License
3
+ """Loading script for DiffusionDB."""
4
+
5
+ import numpy as np
6
+ from json import load, dump
7
+ from os.path import join, basename
8
+
9
+ import datasets
10
+
11
+ # Find for instance the citation on arxiv or on the dataset repo/website
12
+ _CITATION = """\
13
+ @article{wangDiffusionDBLargescalePrompt2022,
14
+ title = {{{DiffusionDB}}: {{A}} Large-Scale Prompt Gallery Dataset for Text-to-Image Generative Models},
15
+ author = {Wang, Zijie J. and Montoya, Evan and Munechika, David and Yang, Haoyang and Hoover, Benjamin and Chau, Duen Horng},
16
+ year = {2022},
17
+ journal = {arXiv:2210.14896 [cs]},
18
+ url = {https://arxiv.org/abs/2210.14896}
19
+ }
20
+ """
21
+
22
+ # You can copy an official description
23
+ _DESCRIPTION = """
24
+ DiffusionDB is the first large-scale text-to-image prompt dataset. It contains 2
25
+ million images generated by Stable Diffusion using prompts and hyperparameters
26
+ specified by real users. The unprecedented scale and diversity of this
27
+ human-actuated dataset provide exciting research opportunities in understanding
28
+ the interplay between prompts and generative models, detecting deepfakes, and
29
+ designing human-AI interaction tools to help users more easily use these models.
30
+ """
31
+
32
+ _HOMEPAGE = "https://poloclub.github.io/diffusiondb"
33
+ _LICENSE = "CC0 1.0"
34
+ _VERSION = datasets.Version("0.9.0")
35
+
36
+ # Programmatically generate the URLs for different parts
37
+ # https://huggingface.co/datasets/poloclub/diffusiondb/resolve/main/images/part-000001.zip
38
+ _URLS = {}
39
+ _PART_IDS = range(1, 2001)
40
+
41
+ for i in _PART_IDS:
42
+ _URLS[
43
+ i
44
+ ] = f"https://huggingface.co/datasets/poloclub/diffusiondb/resolve/main/images/part-{i:06}.zip"
45
+
46
+
47
+ class DiffusionDBConfig(datasets.BuilderConfig):
48
+ """BuilderConfig for DiffusionDB."""
49
+
50
+ def __init__(self, part_ids, **kwargs):
51
+ """BuilderConfig for DiffusionDB.
52
+ Args:
53
+ part_ids([int]): A list of part_ids.
54
+ **kwargs: keyword arguments forwarded to super.
55
+ """
56
+ super(DiffusionDBConfig, self).__init__(version=_VERSION, **kwargs)
57
+ self.part_ids = part_ids
58
+
59
+
60
+ class DiffusionDB(datasets.GeneratorBasedBuilder):
61
+ """A large-scale text-to-image prompt gallery dataset based on Stable Diffusion."""
62
+
63
+ BUILDER_CONFIGS = []
64
+
65
+ # Programmatically generate configuration options (HF requires to use a string
66
+ # as the config key)
67
+ for num_k in [1, 5, 10, 50, 100, 500, 1000, 2000]:
68
+ for sampling in ["first", "random"]:
69
+ num_k_str = f"{num_k}k" if num_k < 1000 else f"{num_k // 1000}m"
70
+
71
+ if sampling == "random":
72
+ # Name the config
73
+ cur_name = "random_" + num_k_str
74
+
75
+ # Add a short description for each config
76
+ cur_description = (
77
+ f"Random {num_k_str} images with their prompts and parameters"
78
+ )
79
+
80
+ # Sample part_ids
81
+ part_ids = np.random.choice(_PART_IDS, num_k, replace=False).tolist()
82
+ else:
83
+ # Name the config
84
+ cur_name = "first_" + num_k_str
85
+
86
+ # Add a short description for each config
87
+ cur_description = f"The first {num_k_str} images in this dataset with their prompts and parameters"
88
+
89
+ # Sample part_ids
90
+ part_ids = _PART_IDS[1 : num_k + 1]
91
+
92
+ # Create configs
93
+ BUILDER_CONFIGS.append(
94
+ DiffusionDBConfig(
95
+ name=cur_name,
96
+ part_ids=part_ids,
97
+ description=cur_description,
98
+ ),
99
+ )
100
+
101
+ # Default to only load 1k random images
102
+ DEFAULT_CONFIG_NAME = "random_1k"
103
+
104
+ def _info(self):
105
+ """Specify the information of DiffusionDB."""
106
+
107
+ features = datasets.Features(
108
+ {
109
+ "image": datasets.Image(),
110
+ "prompt": datasets.Value("string"),
111
+ "seed": datasets.Value("int64"),
112
+ "step": datasets.Value("int64"),
113
+ "cfg": datasets.Value("float32"),
114
+ "sampler": datasets.Value("string"),
115
+ },
116
+ )
117
+ return datasets.DatasetInfo(
118
+ description=_DESCRIPTION,
119
+ features=features,
120
+ supervised_keys=None,
121
+ homepage=_HOMEPAGE,
122
+ license=_LICENSE,
123
+ citation=_CITATION,
124
+ )
125
+
126
+ def _split_generators(self, dl_manager):
127
+ # If several configurations are possible (listed in BUILDER_CONFIGS),
128
+ # the configuration selected by the user is in self.config.name
129
+
130
+ # dl_manager is a datasets.download.DownloadManager that can be used to
131
+ # download and extract URLS It can accept any type or nested list/dict
132
+ # and will give back the same structure with the url replaced with path
133
+ # to local files. By default the archives will be extracted and a path
134
+ # to a cached folder where they are extracted is returned instead of the
135
+ # archive
136
+
137
+ # Download and extract zip files of all sampled part_ids
138
+ data_dirs = []
139
+ json_paths = []
140
+
141
+ for cur_part_id in self.config.part_ids:
142
+ cur_url = _URLS[cur_part_id]
143
+ data_dir = dl_manager.download_and_extract(cur_url)
144
+
145
+ data_dirs.append(data_dir)
146
+ json_paths.append(join(data_dir, f"part-{cur_part_id:06}.json"))
147
+
148
+ return [
149
+ datasets.SplitGenerator(
150
+ name=datasets.Split.TRAIN,
151
+ # These kwargs will be passed to _generate_examples
152
+ gen_kwargs={
153
+ "data_dirs": data_dirs,
154
+ "json_paths": json_paths,
155
+ },
156
+ ),
157
+ ]
158
+
159
+ def _generate_examples(self, data_dirs, json_paths):
160
+ # This method handles input defined in _split_generators to yield
161
+ # (key, example) tuples from the dataset.
162
+ # The `key` is for legacy reasons (tfds) and is not important in itself,
163
+ # but must be unique for each example.
164
+
165
+ # Iterate through all extracted zip folders
166
+ num_data_dirs = len(data_dirs)
167
+ assert num_data_dirs == len(json_paths)
168
+
169
+ for k in range(num_data_dirs):
170
+ cur_data_dir = data_dirs[k]
171
+ cur_json_path = json_paths[k]
172
+
173
+ json_data = load(open(cur_json_path, "r", encoding="utf8"))
174
+
175
+ for img_name in json_data:
176
+ img_params = json_data[img_name]
177
+ img_path = join(cur_data_dir, img_name)
178
+
179
+ # Yields examples as (key, example) tuples
180
+ yield img_name, {
181
+ "image": {"path": img_path, "bytes": open(img_path, "rb").read()},
182
+ "prompt": img_params["p"],
183
+ "seed": int(img_params["se"]),
184
+ "step": int(img_params["st"]),
185
+ "cfg": float(img_params["c"]),
186
+ "sampler": img_params["sa"],
187
+ }