Datasets:
ivelin
commited on
Commit
•
ad1211b
1
Parent(s):
c85c876
chore: checkpoint"
Browse filesSigned-off-by: ivelin <ivelin.eth@gmail.com>
- ui_refexp.py +21 -17
ui_refexp.py
CHANGED
@@ -19,7 +19,7 @@ import csv
|
|
19 |
import glob
|
20 |
import os
|
21 |
import tensorflow as tf
|
22 |
-
|
23 |
import datasets
|
24 |
|
25 |
import numpy as np
|
@@ -52,7 +52,8 @@ _LICENSE = "CC BY 4.0"
|
|
52 |
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
53 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
54 |
_DATA_URLs = {
|
55 |
-
"ui_refexp": "https://
|
|
|
56 |
}
|
57 |
|
58 |
_METADATA_URLS = {
|
@@ -64,8 +65,10 @@ _METADATA_URLS = {
|
|
64 |
}
|
65 |
|
66 |
|
67 |
-
def
|
68 |
-
"""Filter and convert refexp tfrecord file to dict object.
|
|
|
|
|
69 |
count = 0
|
70 |
donut_refexp_dict = []
|
71 |
for raw_record in raw_tfr_dataset:
|
@@ -76,9 +79,8 @@ def tfrecord2dict(raw_tfr_dataset: None):
|
|
76 |
# print(f"feature keys: {example.features.feature.keys}")
|
77 |
donut_refexp = {}
|
78 |
image_id = example.features.feature['image/id'].bytes_list.value[0].decode()
|
79 |
-
|
80 |
-
donut_refexp["
|
81 |
-
donut_refexp["question"] = example.features.feature["image/ref_exp/text"].bytes_list.value[0].decode()
|
82 |
object_idx = example.features.feature["image/ref_exp/label"].int64_list.value[0]
|
83 |
object_idx = int(object_idx)
|
84 |
# print(f"object_idx: {object_idx}")
|
@@ -88,7 +90,7 @@ def tfrecord2dict(raw_tfr_dataset: None):
|
|
88 |
object_bb["ymin"] = example.features.feature['image/object/bbox/ymin'].float_list.value[object_idx]
|
89 |
object_bb["xmax"] = example.features.feature['image/object/bbox/xmax'].float_list.value[object_idx]
|
90 |
object_bb["ymax"] = example.features.feature['image/object/bbox/ymax'].float_list.value[object_idx]
|
91 |
-
donut_refexp["
|
92 |
donut_refexp_dict.append(donut_refexp)
|
93 |
if count != 3:
|
94 |
continue
|
@@ -211,13 +213,15 @@ class UIRefExp(datasets.GeneratorBasedBuilder):
|
|
211 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
212 |
# filter tfrecord and convert to json
|
213 |
|
214 |
-
|
215 |
-
|
|
|
|
|
216 |
for file_path, file_obj in images:
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
|
|
19 |
import glob
|
20 |
import os
|
21 |
import tensorflow as tf
|
22 |
+
import re
|
23 |
import datasets
|
24 |
|
25 |
import numpy as np
|
|
|
52 |
# The HuggingFace dataset library don't host the datasets but only point to the original files
|
53 |
# This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
|
54 |
_DATA_URLs = {
|
55 |
+
"ui_refexp": "https://storage.googleapis.com/crowdstf-rico-uiuc-4540/rico_dataset_v0.1/unique_uis.tar.gz"
|
56 |
+
# "https://huggingface.co/datasets/ncoop57/rico_captions/resolve/main/captions_hierarchies_images_filtered.zip",
|
57 |
}
|
58 |
|
59 |
_METADATA_URLS = {
|
|
|
65 |
}
|
66 |
|
67 |
|
68 |
+
def tfrecord2list(tfr_file: None):
|
69 |
+
"""Filter and convert refexp tfrecord file to a list of dict object.
|
70 |
+
Each sample in the list is a dict with the following keys: (image_id, prompt, target_bounding_box)"""
|
71 |
+
test_raw_dataset = tf.data.TFRecordDataset([tfr_file])
|
72 |
count = 0
|
73 |
donut_refexp_dict = []
|
74 |
for raw_record in raw_tfr_dataset:
|
|
|
79 |
# print(f"feature keys: {example.features.feature.keys}")
|
80 |
donut_refexp = {}
|
81 |
image_id = example.features.feature['image/id'].bytes_list.value[0].decode()
|
82 |
+
donut_refexp["image_id"] = image_id
|
83 |
+
donut_refexp["prompt"] = example.features.feature["image/ref_exp/text"].bytes_list.value[0].decode()
|
|
|
84 |
object_idx = example.features.feature["image/ref_exp/label"].int64_list.value[0]
|
85 |
object_idx = int(object_idx)
|
86 |
# print(f"object_idx: {object_idx}")
|
|
|
90 |
object_bb["ymin"] = example.features.feature['image/object/bbox/ymin'].float_list.value[object_idx]
|
91 |
object_bb["xmax"] = example.features.feature['image/object/bbox/xmax'].float_list.value[object_idx]
|
92 |
object_bb["ymax"] = example.features.feature['image/object/bbox/ymax'].float_list.value[object_idx]
|
93 |
+
donut_refexp["target_bounding_box"] = object_bb
|
94 |
donut_refexp_dict.append(donut_refexp)
|
95 |
if count != 3:
|
96 |
continue
|
|
|
213 |
# The `key` is here for legacy reason (tfds) and is not important in itself.
|
214 |
# filter tfrecord and convert to json
|
215 |
|
216 |
+
metadata = tfrecord2list(metadata_file)
|
217 |
+
files_to_keep = set()
|
218 |
+
for sample in metadata:
|
219 |
+
files_to_keep.add(sample["image_id"])
|
220 |
for file_path, file_obj in images:
|
221 |
+
image_id = file_path.search("(\d+).jpg").group(1)
|
222 |
+
if image_id and image_id in files_to_keep:
|
223 |
+
label = file_path.split("/")[2]
|
224 |
+
yield file_path, {
|
225 |
+
"image": {"path": file_path, "bytes": file_obj.read()},
|
226 |
+
"label": label,
|
227 |
+
}
|