ivelin commited on
Commit
ce1b4f4
1 Parent(s): 5666321

fix: checkpoint

Browse files

Signed-off-by: ivelin <ivelin.eth@gmail.com>

Files changed (1) hide show
  1. ui_refexp.py +65 -67
ui_refexp.py CHANGED
@@ -67,40 +67,40 @@ _METADATA_URLS = {
67
 
68
 
69
  def tfrecord2dict(raw_tfr_dataset: None):
70
- """Filter and convert refexp tfrecord file to dict object."""
71
- count = 0
72
- donut_refexp_dict = []
73
- for raw_record in raw_tfr_dataset:
74
- count += 1
75
- example = tf.train.Example()
76
- example.ParseFromString(raw_record.numpy())
77
- # print(f"total UI objects in this sample: {len(example.features.feature['image/object/bbox/xmin'].float_list.value)}")
78
- # print(f"feature keys: {example.features.feature.keys}")
79
- donut_refexp = {}
80
- image_id = example.features.feature['image/id'].bytes_list.value[0].decode()
81
- image_path=zipurl_template.format(image_id = image_id)
82
- donut_refexp["image_path"] = image_path
83
- donut_refexp["question"] = example.features.feature["image/ref_exp/text"].bytes_list.value[0].decode()
84
- object_idx = example.features.feature["image/ref_exp/label"].int64_list.value[0]
85
- object_idx = int(object_idx)
86
- # print(f"object_idx: {object_idx}")
87
- object_bb = {}
88
- # print(f"example.features.feature['image/object/bbox/xmin']: {example.features.feature['image/object/bbox/xmin'].float_list.value[object_idx]}")
89
- object_bb["xmin"] = example.features.feature['image/object/bbox/xmin'].float_list.value[object_idx]
90
- object_bb["ymin"] = example.features.feature['image/object/bbox/ymin'].float_list.value[object_idx]
91
- object_bb["xmax"] = example.features.feature['image/object/bbox/xmax'].float_list.value[object_idx]
92
- object_bb["ymax"] = example.features.feature['image/object/bbox/ymax'].float_list.value[object_idx]
93
- donut_refexp["answer"] = object_bb
94
- donut_refexp_dict.append(donut_refexp)
95
- if count != 3:
96
- continue
97
- print(f"Donut refexp: {donut_refexp}")
98
- # for key, feature in example.features.feature.items():
99
- # if key in ['image/id', "image/ref_exp/text", "image/ref_exp/label", 'image/object/bbox/xmin', 'image/object/bbox/ymin', 'image/object/bbox/xmax', 'image/object/bbox/ymax']:
100
- # print(key, feature)
101
-
102
- print(f"Total samples in the raw dataset: {count}")
103
- return donut_refexp_dict
104
 
105
 
106
  class UIRefExp(datasets.GeneratorBasedBuilder):
@@ -120,34 +120,35 @@ class UIRefExp(datasets.GeneratorBasedBuilder):
120
  # data = datasets.load_dataset('my_dataset', 'first_domain')
121
  # data = datasets.load_dataset('my_dataset', 'second_domain')
122
  BUILDER_CONFIGS = [
123
- datasets.BuilderConfig(
124
- name="ui_refexp",
125
- version=VERSION,
126
- description="Contains 66k+ unique UI screens. For each UI, we present a screenshot (JPG file) and the text shown on the screen that was extracted using an OCR model.",
127
- )
128
- # ,
129
- # # datasets.BuilderConfig(
130
- # # name="screenshots_captions_filtered",
131
- # # version=VERSION,
132
- # # description="Contains 25k unique UI screens. For each UI, we present a screenshot (JPG file) and the text shown on the screen that was extracted using an OCR model. Filtering was done as discussed in this paper: https://aclanthology.org/2020.acl-main.729.pdf",
133
- # # ),
134
- # ]
135
-
136
- DEFAULT_CONFIG_NAME = "screenshots_captions_filtered"
137
 
138
  def _info(self):
139
  features = datasets.Features(
140
  {
141
  "screenshot": datasets.Image(),
142
- "prompt": datasets.Value("string"), # click the search button next to menu drawer at the top of the screen
143
- "target_bounding_box": dict, # [xmin, ymin, xmax, ymax], normalized screen reference values between 0 and 1
 
 
144
  }
145
  )
146
 
147
  return datasets.DatasetInfo(
148
  description=_DESCRIPTION,
149
  features=features,
150
- supervised_keys=("screenshot","prompt", "target_bounding_box"),
151
  homepage=_HOMEPAGE,
152
  license=_LICENSE,
153
  citation=_CITATION,
@@ -161,51 +162,48 @@ class UIRefExp(datasets.GeneratorBasedBuilder):
161
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
162
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
163
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
164
- image_urls = _DATA_URLs[self.config.name]
165
- image_archive = dl_manager.download(image_urls)
166
  # download and extract TFRecord labeling metadata
167
  local_tfrs = {}
168
- for split, tfrecord_url in _METADATA_URLS:
169
  local_tfr_file = dl_manager.download(tfrecord_url)
170
  local_tfrs[split] = local_tfr_file
171
-
 
 
 
172
  return [
173
  datasets.SplitGenerator(
174
  name=datasets.Split.TRAIN,
175
  # These kwargs will be passed to _generate_examples
176
  gen_kwargs={
177
- "root_dir": data_dir,
178
- "metadata_file": local_tfrs["train"],
179
  "images": dl_manager.iter_archive(archive_path),
180
  "split": "train",
181
-
182
  },
183
  ),
184
  datasets.SplitGenerator(
185
  name=datasets.Split.VALIDATION,
186
  # These kwargs will be passed to _generate_examples
187
  gen_kwargs={
188
- "root_dir": data_dir,
189
- "metadata_file": local_tfrs["validation"],
190
  "images": dl_manager.iter_archive(archive_path),
191
  "split": "validation",
192
- },
193
  ),
194
  datasets.SplitGenerator(
195
  name=datasets.Split.TEST,
196
  # These kwargs will be passed to _generate_examples
197
  gen_kwargs={
198
- "root_dir": data_dir,
199
- "metadata_file": local_tfrs["test"],
200
  "images": dl_manager.iter_archive(archive_path),
201
  "split": "test",
202
- },
203
  )
204
  ]
205
 
206
  def _generate_examples(
207
  self,
208
- root_dir,
209
  metadata_file,
210
  images,
211
  split, # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
@@ -214,12 +212,12 @@ class UIRefExp(datasets.GeneratorBasedBuilder):
214
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
215
  # The `key` is here for legacy reason (tfds) and is not important in itself.
216
  # filter tfrecord and convert to json
217
-
218
  with open(metadata_path, encoding="utf-8") as f:
219
  files_to_keep = set(f.read().split("\n"))
220
  for file_path, file_obj in images:
221
  if file_path.startswith(_IMAGES_DIR):
222
- if file_path[len(_IMAGES_DIR) : -len(".jpg")] in files_to_keep:
223
  label = file_path.split("/")[2]
224
  yield file_path, {
225
  "image": {"path": file_path, "bytes": file_obj.read()},
 
67
 
68
 
69
  def tfrecord2dict(raw_tfr_dataset: None):
70
+ """Filter and convert refexp tfrecord file to dict object."""
71
+ count = 0
72
+ donut_refexp_dict = []
73
+ for raw_record in raw_tfr_dataset:
74
+ count += 1
75
+ example = tf.train.Example()
76
+ example.ParseFromString(raw_record.numpy())
77
+ # print(f"total UI objects in this sample: {len(example.features.feature['image/object/bbox/xmin'].float_list.value)}")
78
+ # print(f"feature keys: {example.features.feature.keys}")
79
+ donut_refexp = {}
80
+ image_id = example.features.feature['image/id'].bytes_list.value[0].decode()
81
+ image_path = zipurl_template.format(image_id=image_id)
82
+ donut_refexp["image_path"] = image_path
83
+ donut_refexp["question"] = example.features.feature["image/ref_exp/text"].bytes_list.value[0].decode()
84
+ object_idx = example.features.feature["image/ref_exp/label"].int64_list.value[0]
85
+ object_idx = int(object_idx)
86
+ # print(f"object_idx: {object_idx}")
87
+ object_bb = {}
88
+ # print(f"example.features.feature['image/object/bbox/xmin']: {example.features.feature['image/object/bbox/xmin'].float_list.value[object_idx]}")
89
+ object_bb["xmin"] = example.features.feature['image/object/bbox/xmin'].float_list.value[object_idx]
90
+ object_bb["ymin"] = example.features.feature['image/object/bbox/ymin'].float_list.value[object_idx]
91
+ object_bb["xmax"] = example.features.feature['image/object/bbox/xmax'].float_list.value[object_idx]
92
+ object_bb["ymax"] = example.features.feature['image/object/bbox/ymax'].float_list.value[object_idx]
93
+ donut_refexp["answer"] = object_bb
94
+ donut_refexp_dict.append(donut_refexp)
95
+ if count != 3:
96
+ continue
97
+ print(f"Donut refexp: {donut_refexp}")
98
+ # for key, feature in example.features.feature.items():
99
+ # if key in ['image/id', "image/ref_exp/text", "image/ref_exp/label", 'image/object/bbox/xmin', 'image/object/bbox/ymin', 'image/object/bbox/xmax', 'image/object/bbox/ymax']:
100
+ # print(key, feature)
101
+
102
+ print(f"Total samples in the raw dataset: {count}")
103
+ return donut_refexp_dict
104
 
105
 
106
  class UIRefExp(datasets.GeneratorBasedBuilder):
 
120
  # data = datasets.load_dataset('my_dataset', 'first_domain')
121
  # data = datasets.load_dataset('my_dataset', 'second_domain')
122
  BUILDER_CONFIGS = [
123
+ datasets.BuilderConfig(
124
+ name="ui_refexp",
125
+ version=VERSION,
126
+ description="Contains 66k+ unique UI screens. For each UI, we present a screenshot (JPG file) and the text shown on the screen that was extracted using an OCR model.",
127
+ )
128
+ # ,
129
+ # # datasets.BuilderConfig(
130
+ # # name="screenshots_captions_filtered",
131
+ # # version=VERSION,
132
+ # # description="Contains 25k unique UI screens. For each UI, we present a screenshot (JPG file) and the text shown on the screen that was extracted using an OCR model. Filtering was done as discussed in this paper: https://aclanthology.org/2020.acl-main.729.pdf",
133
+ # # ),
134
+ ]
135
+
136
+ DEFAULT_CONFIG_NAME = "ui_refexp"
137
 
138
  def _info(self):
139
  features = datasets.Features(
140
  {
141
  "screenshot": datasets.Image(),
142
+ # click the search button next to menu drawer at the top of the screen
143
+ "prompt": datasets.Value("string"),
144
+ # [xmin, ymin, xmax, ymax], normalized screen reference values between 0 and 1
145
+ "target_bounding_box": dict,
146
  }
147
  )
148
 
149
  return datasets.DatasetInfo(
150
  description=_DESCRIPTION,
151
  features=features,
 
152
  homepage=_HOMEPAGE,
153
  license=_LICENSE,
154
  citation=_CITATION,
 
162
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
163
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
164
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
 
 
165
  # download and extract TFRecord labeling metadata
166
  local_tfrs = {}
167
+ for split, tfrecord_url in _METADATA_URLS[self.config.name].items():
168
  local_tfr_file = dl_manager.download(tfrecord_url)
169
  local_tfrs[split] = local_tfr_file
170
+ # download image files
171
+ image_urls = _DATA_URLs[self.config.name]
172
+ archive_path = dl_manager.download(image_urls)
173
+
174
  return [
175
  datasets.SplitGenerator(
176
  name=datasets.Split.TRAIN,
177
  # These kwargs will be passed to _generate_examples
178
  gen_kwargs={
179
+ "metadata_file": local_tfrs["train"],
 
180
  "images": dl_manager.iter_archive(archive_path),
181
  "split": "train",
182
+
183
  },
184
  ),
185
  datasets.SplitGenerator(
186
  name=datasets.Split.VALIDATION,
187
  # These kwargs will be passed to _generate_examples
188
  gen_kwargs={
189
+ "metadata_file": local_tfrs["validation"],
 
190
  "images": dl_manager.iter_archive(archive_path),
191
  "split": "validation",
192
+ },
193
  ),
194
  datasets.SplitGenerator(
195
  name=datasets.Split.TEST,
196
  # These kwargs will be passed to _generate_examples
197
  gen_kwargs={
198
+ "metadata_file": local_tfrs["test"],
 
199
  "images": dl_manager.iter_archive(archive_path),
200
  "split": "test",
201
+ },
202
  )
203
  ]
204
 
205
  def _generate_examples(
206
  self,
 
207
  metadata_file,
208
  images,
209
  split, # method parameters are unpacked from `gen_kwargs` as given in `_split_generators`
 
212
  # This method handles input defined in _split_generators to yield (key, example) tuples from the dataset.
213
  # The `key` is here for legacy reason (tfds) and is not important in itself.
214
  # filter tfrecord and convert to json
215
+
216
  with open(metadata_path, encoding="utf-8") as f:
217
  files_to_keep = set(f.read().split("\n"))
218
  for file_path, file_obj in images:
219
  if file_path.startswith(_IMAGES_DIR):
220
+ if file_path[len(_IMAGES_DIR): -len(".jpg")] in files_to_keep:
221
  label = file_path.split("/")[2]
222
  yield file_path, {
223
  "image": {"path": file_path, "bytes": file_obj.read()},