Datasets:

Languages:
English
Size Categories:
100K<n<1M
ArXiv:
License:
xujz0703 commited on
Commit
dd79feb
1 Parent(s): 3dd2625

Update ImageRewardDB.py

Browse files
Files changed (1) hide show
  1. ImageRewardDB.py +134 -33
ImageRewardDB.py CHANGED
@@ -43,14 +43,14 @@ To build the ImageRewadDB, we design a pipeline tailored for it, establishing cr
43
  annotator training, optimizing labeling experience, and ensuring quality validation. \
44
  """
45
 
46
- _HOMEPAGE = "https://huggingface.co/datasets/wuyuchen/ImageRewardDB"
47
  _VERSION = datasets.Version("1.0.0")
48
 
49
  _LICENSE = "Apache License 2.0"
50
 
51
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
- _REPO_ID = "wuyuchen/ImageRewardDB"
54
  _URLS = {}
55
  _PART_IDS = {
56
  "train": 32,
@@ -107,26 +107,56 @@ class ImageRewardDB(datasets.GeneratorBasedBuilder):
107
  "validation": 2,
108
  "test": 2
109
  }
 
 
 
110
  BUILDER_CONFIGS.append(
111
  ImageRewardDBConfig(name=f"{num_k}k", part_ids=part_ids, description=f"This is a {num_k}k-scale ImageRewardDB")
112
  )
 
 
 
113
 
114
  DEFAULT_CONFIG_NAME = "8k" # It's not mandatory to have a default configuration. Just use one if it make sense.
115
 
116
  def _info(self):
117
- features = datasets.Features(
118
- {
119
- "image": datasets.Image(),
120
- "prompt_id": datasets.Value("string"),
121
- "prompt": datasets.Value("string"),
122
- "classification": datasets.Value("string"),
123
- "image_amount_in_total": datasets.Value("int8"),
124
- "rank": datasets.Value("int8"),
125
- "overall_rating": datasets.Value("int8"),
126
- "image_text_alignment_rating": datasets.Value("int8"),
127
- "fidelity_rating": datasets.Value("int8")
128
- }
129
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
130
  return datasets.DatasetInfo(
131
  # This is the description that will appear on the datasets page.
132
  description=_DESCRIPTION,
@@ -199,22 +229,93 @@ class ImageRewardDB(datasets.GeneratorBasedBuilder):
199
  assert num_data_dirs == len(json_paths)
200
 
201
  #Iterate throug all extracted zip folders for images
202
- metadata_table = pd.read_parquet(metadata_path)
203
  for index, json_path in enumerate(json_paths):
204
- json_data = json.load(open(json_path, "r", encoding="utf-8"))
205
- for example in json_data:
206
- image_path = os.path.join(data_dirs[index], str(example["image_path"]).split("/")[-1])
207
- yield example["image_path"], {
208
- "image": {
209
- "path": image_path,
210
- "bytes": open(image_path, "rb").read()
211
- },
212
- "prompt_id": example["prompt_id"],
213
- "prompt": example["prompt"],
214
- "classification": example["classification"],
215
- "image_amount_in_total": example["image_amount_in_total"],
216
- "rank": example["rank"],
217
- "overall_rating": example["overall_rating"],
218
- "image_text_alignment_rating": example["image_text_alignment_rating"],
219
- "fidelity_rating": example["fidelity_rating"]
220
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
  annotator training, optimizing labeling experience, and ensuring quality validation. \
44
  """
45
 
46
+ _HOMEPAGE = "https://huggingface.co/datasets/THUDM/ImageRewardDB"
47
  _VERSION = datasets.Version("1.0.0")
48
 
49
  _LICENSE = "Apache License 2.0"
50
 
51
  # The HuggingFace Datasets library doesn't host the datasets but only points to the original files.
52
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
+ _REPO_ID = "THUDM/ImageRewardDB"
54
  _URLS = {}
55
  _PART_IDS = {
56
  "train": 32,
 
107
  "validation": 2,
108
  "test": 2
109
  }
110
+ BUILDER_CONFIGS.append(
111
+ ImageRewardDBConfig(name=f"{num_k}k_group", part_ids=part_ids, description=f"This is a {num_k}k-scale groups of ImageRewardDB")
112
+ )
113
  BUILDER_CONFIGS.append(
114
  ImageRewardDBConfig(name=f"{num_k}k", part_ids=part_ids, description=f"This is a {num_k}k-scale ImageRewardDB")
115
  )
116
+ BUILDER_CONFIGS.append(
117
+ ImageRewardDBConfig(name=f"{num_k}k_pair", part_ids=part_ids, description=f"This is a {num_k}k-scale pairs of ImageRewardDB")
118
+ )
119
 
120
  DEFAULT_CONFIG_NAME = "8k" # It's not mandatory to have a default configuration. Just use one if it make sense.
121
 
122
  def _info(self):
123
+ if "group" in self.config.name:
124
+ features = datasets.Features(
125
+ {
126
+ "prompt_id": datasets.Value("string"),
127
+ "prompt": datasets.Value("string"),
128
+ "classification": datasets.Value("string"),
129
+ "image": datasets.Sequence(datasets.Image()),
130
+ "rank": datasets.Sequence(datasets.Value("int8")),
131
+ "overall_rating": datasets.Sequence(datasets.Value("int8")),
132
+ "image_text_alignment_rating": datasets.Sequence(datasets.Value("int8")),
133
+ "fidelity_rating": datasets.Sequence(datasets.Value("int8"))
134
+ }
135
+ )
136
+ elif "pair" in self.config.name:
137
+ features = datasets.Features(
138
+ {
139
+ "prompt_id": datasets.Value("string"),
140
+ "prompt": datasets.Value("string"),
141
+ "classification": datasets.Value("string"),
142
+ "img_better": datasets.Image(),
143
+ "img_worse": datasets.Image()
144
+ }
145
+ )
146
+ else:
147
+ features = datasets.Features(
148
+ {
149
+ "image": datasets.Image(),
150
+ "prompt_id": datasets.Value("string"),
151
+ "prompt": datasets.Value("string"),
152
+ "classification": datasets.Value("string"),
153
+ "image_amount_in_total": datasets.Value("int8"),
154
+ "rank": datasets.Value("int8"),
155
+ "overall_rating": datasets.Value("int8"),
156
+ "image_text_alignment_rating": datasets.Value("int8"),
157
+ "fidelity_rating": datasets.Value("int8")
158
+ }
159
+ )
160
  return datasets.DatasetInfo(
161
  # This is the description that will appear on the datasets page.
162
  description=_DESCRIPTION,
 
229
  assert num_data_dirs == len(json_paths)
230
 
231
  #Iterate throug all extracted zip folders for images
232
+ # metadata_table = pd.read_parquet(metadata_path)
233
  for index, json_path in enumerate(json_paths):
234
+ json_data = json.load(open(json_path, "r", encoding="utf-8"))
235
+ if "group" in self.config.name or "pair" in self.config.name:
236
+ group_num = 0
237
+ image_path = []
238
+ rank = []
239
+ overall_rating, image_text_alignment_rating, fidelity_rating = [], [], []
240
+ for sample in json_data:
241
+ if group_num == 0:
242
+ image_path.clear()
243
+ rank.clear()
244
+ overall_rating.clear()
245
+ image_text_alignment_rating.clear()
246
+ fidelity_rating.clear()
247
+ prompt_id = sample["prompt_id"]
248
+ prompt = sample["prompt"]
249
+ classification = sample["classification"]
250
+ image_amount_in_total = sample["image_amount_in_total"]
251
+ # image_path.append(sample["image_path"])
252
+ image_path.append(os.path.join(data_dirs[index], str(sample["image_path"]).split("/")[-1]))
253
+ rank.append(sample["rank"])
254
+ overall_rating.append(sample["overall_rating"])
255
+ image_text_alignment_rating.append(sample["image_text_alignment_rating"])
256
+ fidelity_rating.append(sample["fidelity_rating"])
257
+ group_num += 1
258
+ if group_num == image_amount_in_total:
259
+ group_num = 0
260
+ if "group" in self.config.name:
261
+ yield prompt_id, ({
262
+ "prompt_id": prompt_id,
263
+ "prompt": prompt,
264
+ "classification": classification,
265
+ "image": [{
266
+ "path": image_path[idx],
267
+ "bytes": open(image_path[idx], "rb").read()
268
+ } for idx in range(image_amount_in_total)],
269
+ "rank": rank,
270
+ "overall_rating": overall_rating,
271
+ "image_text_alignment_rating": image_text_alignment_rating,
272
+ "fidelity_rating": fidelity_rating,
273
+ })
274
+ else:
275
+ for idx in range(image_amount_in_total):
276
+ for idy in range(idx+1, image_amount_in_total):
277
+ if rank[idx] < rank[idy]:
278
+ yield prompt_id, ({
279
+ "prompt_id": prompt_id,
280
+ "prompt": prompt,
281
+ "classification": classification,
282
+ "img_better": {
283
+ "path": image_path[idx],
284
+ "bytes": open(image_path[idx], "rb").read()
285
+ },
286
+ "img_worse": {
287
+ "path": image_path[idy],
288
+ "bytes": open(image_path[idy], "rb").read()
289
+ }
290
+ })
291
+ elif rank[idx] > rank[idy]:
292
+ yield prompt_id, ({
293
+ "prompt_id": prompt_id,
294
+ "prompt": prompt,
295
+ "classification": classification,
296
+ "img_better": {
297
+ "path": image_path[idy],
298
+ "bytes": open(image_path[idy], "rb").read()
299
+ },
300
+ "img_worse": {
301
+ "path": image_path[idx],
302
+ "bytes": open(image_path[idx], "rb").read()
303
+ }
304
+ })
305
+ else:
306
+ for example in json_data:
307
+ image_path = os.path.join(data_dirs[index], str(example["image_path"]).split("/")[-1])
308
+ yield example["image_path"], {
309
+ "image": {
310
+ "path": image_path,
311
+ "bytes": open(image_path, "rb").read()
312
+ },
313
+ "prompt_id": example["prompt_id"],
314
+ "prompt": example["prompt"],
315
+ "classification": example["classification"],
316
+ "image_amount_in_total": example["image_amount_in_total"],
317
+ "rank": example["rank"],
318
+ "overall_rating": example["overall_rating"],
319
+ "image_text_alignment_rating": example["image_text_alignment_rating"],
320
+ "fidelity_rating": example["fidelity_rating"]
321
+ }