ivelin commited on
Commit
fe450dd
1 Parent(s): 5d5ab1d

fix: update draft script

Browse files
Files changed (1) hide show
  1. ui_refexp.py +49 -18
ui_refexp.py CHANGED
@@ -12,7 +12,7 @@
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
- """TODO: Add a description here."""
16
 
17
 
18
  import csv
@@ -47,17 +47,27 @@ _HOMEPAGE = "https://github.com/google-research-datasets/uibert"
47
  # TODO: Add the licence for the dataset here if you can find it
48
  _LICENSE = "CC BY 4.0"
49
 
50
- # TODO: Add link to the official dataset URLs here
51
  # The HuggingFace dataset library don't host the datasets but only point to the original files
52
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
  _DATA_URLs = {
54
- "ui_refexp": "https://huggingface.co/datasets/ncoop57/rico_captions/resolve/main/captions_hierarchies_images.zip",
 
 
 
 
 
 
 
 
 
 
55
  }
56
 
57
 
58
  # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
59
  class UIRefExp(datasets.GeneratorBasedBuilder):
60
- """TODO: Short description of my dataset."""
61
 
62
  VERSION = datasets.Version("1.1.0")
63
 
@@ -72,12 +82,13 @@ class UIRefExp(datasets.GeneratorBasedBuilder):
72
  # You will be able to load one or the other configurations in the following list with
73
  # data = datasets.load_dataset('my_dataset', 'first_domain')
74
  # data = datasets.load_dataset('my_dataset', 'second_domain')
75
- # BUILDER_CONFIGS = [
76
- # datasets.BuilderConfig(
77
- # name="ui_refexp",
78
- # version=VERSION,
79
- # description="Contains 66k+ unique UI screens. For each UI, we present a screenshot (JPG file) and the text shown on the screen that was extracted using an OCR model.",
80
- # ),
 
81
  # # datasets.BuilderConfig(
82
  # # name="screenshots_captions_filtered",
83
  # # version=VERSION,
@@ -85,22 +96,21 @@ class UIRefExp(datasets.GeneratorBasedBuilder):
85
  # # ),
86
  # ]
87
 
88
- # DEFAULT_CONFIG_NAME = "screenshots_captions_filtered"
89
 
90
  def _info(self):
91
  features = datasets.Features(
92
  {
93
- "screenshot_path": datasets.Value("string"),
94
- "caption": datasets.Value("string"),
95
- # This is a JSON obj, but will be coded as a string
96
- "hierarchy": datasets.Value("string"),
97
  }
98
  )
99
 
100
  return datasets.DatasetInfo(
101
  description=_DESCRIPTION,
102
  features=features,
103
- supervised_keys=None,
104
  homepage=_HOMEPAGE,
105
  license=_LICENSE,
106
  citation=_CITATION,
@@ -108,22 +118,43 @@ class UIRefExp(datasets.GeneratorBasedBuilder):
108
 
109
  def _split_generators(self, dl_manager):
110
  """Returns SplitGenerators."""
111
- # TODO: This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
112
  # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
113
 
114
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
115
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
116
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
117
  my_urls = _DATA_URLs[self.config.name]
118
- data_dir = dl_manager.download_and_extract(my_urls)
119
  return [
120
  datasets.SplitGenerator(
121
  name=datasets.Split.TRAIN,
122
  # These kwargs will be passed to _generate_examples
123
  gen_kwargs={
124
  "root_dir": data_dir,
 
 
125
  "split": "train",
 
126
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127
  )
128
  ]
129
 
 
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
  # See the License for the specific language governing permissions and
14
  # limitations under the License.
15
+ """Dataset script for UI Referring Expressions based on the UIBert RefExp dataset."""
16
 
17
 
18
  import csv
 
47
  # TODO: Add the licence for the dataset here if you can find it
48
  _LICENSE = "CC BY 4.0"
49
 
50
+ # Add link to the official dataset URLs here
51
  # The HuggingFace dataset library don't host the datasets but only point to the original files
52
  # This can be an arbitrary nested dict/list of URLs (see below in `_split_generators` method)
53
  _DATA_URLs = {
54
+ "ui_refexp": {
55
+ "images": "https://huggingface.co/datasets/ncoop57/rico_captions/resolve/main/captions_hierarchies_images.zip",
56
+ }
57
+ }
58
+
59
+ _METADATA_URLS = {
60
+ "ui_refexp": {
61
+ "train": "https://github.com/google-research-datasets/uibert/raw/main/ref_exp/train.tfrecord",
62
+ "validation": "https://github.com/google-research-datasets/uibert/raw/main/ref_exp/dev.tfrecord",
63
+ "test": "https://github.com/google-research-datasets/uibert/raw/main/ref_exp/test.tfrecord"
64
+ }
65
  }
66
 
67
 
68
  # TODO: Name of the dataset usually match the script name with CamelCase instead of snake_case
69
  class UIRefExp(datasets.GeneratorBasedBuilder):
70
+ """Dataset with (image, question, answer) fields derive from UIBert RefExp."""
71
 
72
  VERSION = datasets.Version("1.1.0")
73
 
 
82
  # You will be able to load one or the other configurations in the following list with
83
  # data = datasets.load_dataset('my_dataset', 'first_domain')
84
  # data = datasets.load_dataset('my_dataset', 'second_domain')
85
+ BUILDER_CONFIGS = [
86
+ datasets.BuilderConfig(
87
+ name="ui_refexp",
88
+ version=VERSION,
89
+ description="Contains 66k+ unique UI screens. For each UI, we present a screenshot (JPG file) and the text shown on the screen that was extracted using an OCR model.",
90
+ )
91
+ # ,
92
  # # datasets.BuilderConfig(
93
  # # name="screenshots_captions_filtered",
94
  # # version=VERSION,
 
96
  # # ),
97
  # ]
98
 
99
+ DEFAULT_CONFIG_NAME = "screenshots_captions_filtered"
100
 
101
  def _info(self):
102
  features = datasets.Features(
103
  {
104
+ "screenshot": datasets.Image(),
105
+ "prompt": datasets.Value("string"), # click the search button next to menu drawer at the top of the screen
106
+ "target_bounding_box": dict, # [xmin, ymin, xmax, ymax], normalized screen reference values between 0 and 1
 
107
  }
108
  )
109
 
110
  return datasets.DatasetInfo(
111
  description=_DESCRIPTION,
112
  features=features,
113
+ supervised_keys=("screenshot","prompt", "target_bounding_box"),
114
  homepage=_HOMEPAGE,
115
  license=_LICENSE,
116
  citation=_CITATION,
 
118
 
119
  def _split_generators(self, dl_manager):
120
  """Returns SplitGenerators."""
121
+ # This method is tasked with downloading/extracting the data and defining the splits depending on the configuration
122
  # If several configurations are possible (listed in BUILDER_CONFIGS), the configuration selected by the user is in self.config.name
123
 
124
  # dl_manager is a datasets.download.DownloadManager that can be used to download and extract URLs
125
  # It can accept any type or nested list/dict and will give back the same structure with the url replaced with path to local files.
126
  # By default the archives will be extracted and a path to a cached folder where they are extracted is returned instead of the archive
127
  my_urls = _DATA_URLs[self.config.name]
128
+ image_archive = dl_manager.download(my_urls)
129
  return [
130
  datasets.SplitGenerator(
131
  name=datasets.Split.TRAIN,
132
  # These kwargs will be passed to _generate_examples
133
  gen_kwargs={
134
  "root_dir": data_dir,
135
+ "tfrecords_file": ,
136
+ "images": dl_manager.iter_archive(archive_path),
137
  "split": "train",
138
+
139
  },
140
+ ),
141
+ datasets.SplitGenerator(
142
+ name=datasets.Split.VALIDATION,
143
+ # These kwargs will be passed to _generate_examples
144
+ gen_kwargs={
145
+ "root_dir": data_dir,
146
+ "images": dl_manager.iter_archive(archive_path),
147
+ "split": "validation",
148
+ },
149
+ ),
150
+ datasets.SplitGenerator(
151
+ name=datasets.Split.TEST,
152
+ # These kwargs will be passed to _generate_examples
153
+ gen_kwargs={
154
+ "root_dir": data_dir,
155
+ "images": dl_manager.iter_archive(archive_path),
156
+ "split": "test",
157
+ },
158
  )
159
  ]
160