shunk031 commited on
Commit
0c44e73
1 Parent(s): 665d15f

initialize (#1)

Browse files

* add files

* update

* update

* update

* add CI

* update files

* update

* update

* update

* update

* update

* update

* update

.github/workflows/ci.yaml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ pull_request:
7
+ branches: [main]
8
+ paths-ignore:
9
+ - "README.md"
10
+
11
+ jobs:
12
+ test:
13
+ runs-on: ubuntu-latest
14
+ strategy:
15
+ matrix:
16
+ python-version: ["3.9", "3.10"]
17
+
18
+ steps:
19
+ - uses: actions/checkout@v3
20
+
21
+ - name: Set up Python ${{ matrix.python-version }}
22
+ uses: actions/setup-python@v4
23
+ with:
24
+ python-version: ${{ matrix.python-version }}
25
+
26
+ - name: Install dependencies
27
+ run: |
28
+ pip install -U pip setuptools wheel poetry
29
+ poetry install
30
+
31
+ - name: Format
32
+ run: |
33
+ poetry run black --check .
34
+
35
+ - name: Lint
36
+ run: |
37
+ poetry run ruff .
38
+
39
+ - name: Type check
40
+ run: |
41
+ poetry run mypy . \
42
+ --ignore-missing-imports \
43
+ --no-strict-optional \
44
+ --no-site-packages \
45
+ --cache-dir=/dev/null
46
+
47
+ - name: Run tests
48
+ run: |
49
+ poetry run pytest --color=yes -rf
.github/workflows/push_to_hub.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face Hub
2
+
3
+ on:
4
+ workflow_run:
5
+ workflows:
6
+ - CI
7
+ branches:
8
+ - main
9
+ types:
10
+ - completed
11
+
12
+ jobs:
13
+ push_to_hub:
14
+ runs-on: ubuntu-latest
15
+
16
+ steps:
17
+ - name: Checkout repository
18
+ uses: actions/checkout@v3
19
+
20
+ - name: Push to Huggingface hub
21
+ env:
22
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
23
+ HF_USERNAME: ${{ secrets.HF_USERNAME }}
24
+ run: |
25
+ git fetch --unshallow
26
+ git push --force https://${HF_USERNAME}:${HF_TOKEN}@huggingface.co/datasets/${HF_USERNAME}/Magazine main
.gitignore ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by https://www.toptal.com/developers/gitignore/api/python
2
+ # Edit at https://www.toptal.com/developers/gitignore?templates=python
3
+
4
+ datasets
5
+
6
+ ### Python ###
7
+ # Byte-compiled / optimized / DLL files
8
+ __pycache__/
9
+ *.py[cod]
10
+ *$py.class
11
+
12
+ # C extensions
13
+ *.so
14
+
15
+ # Distribution / packaging
16
+ .Python
17
+ build/
18
+ develop-eggs/
19
+ dist/
20
+ downloads/
21
+ eggs/
22
+ .eggs/
23
+ lib/
24
+ lib64/
25
+ parts/
26
+ sdist/
27
+ var/
28
+ wheels/
29
+ share/python-wheels/
30
+ *.egg-info/
31
+ .installed.cfg
32
+ *.egg
33
+ MANIFEST
34
+
35
+ # PyInstaller
36
+ # Usually these files are written by a python script from a template
37
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
38
+ *.manifest
39
+ *.spec
40
+
41
+ # Installer logs
42
+ pip-log.txt
43
+ pip-delete-this-directory.txt
44
+
45
+ # Unit test / coverage reports
46
+ htmlcov/
47
+ .tox/
48
+ .nox/
49
+ .coverage
50
+ .coverage.*
51
+ .cache
52
+ nosetests.xml
53
+ coverage.xml
54
+ *.cover
55
+ *.py,cover
56
+ .hypothesis/
57
+ .pytest_cache/
58
+ cover/
59
+
60
+ # Translations
61
+ *.mo
62
+ *.pot
63
+
64
+ # Django stuff:
65
+ *.log
66
+ local_settings.py
67
+ db.sqlite3
68
+ db.sqlite3-journal
69
+
70
+ # Flask stuff:
71
+ instance/
72
+ .webassets-cache
73
+
74
+ # Scrapy stuff:
75
+ .scrapy
76
+
77
+ # Sphinx documentation
78
+ docs/_build/
79
+
80
+ # PyBuilder
81
+ .pybuilder/
82
+ target/
83
+
84
+ # Jupyter Notebook
85
+ .ipynb_checkpoints
86
+
87
+ # IPython
88
+ profile_default/
89
+ ipython_config.py
90
+
91
+ # pyenv
92
+ # For a library or package, you might want to ignore these files since the code is
93
+ # intended to run in multiple environments; otherwise, check them in:
94
+ .python-version
95
+
96
+ # pipenv
97
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
98
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
99
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
100
+ # install all needed dependencies.
101
+ #Pipfile.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+
110
+ # pdm
111
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112
+ #pdm.lock
113
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114
+ # in version control.
115
+ # https://pdm.fming.dev/#use-with-ide
116
+ .pdm.toml
117
+
118
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
119
+ __pypackages__/
120
+
121
+ # Celery stuff
122
+ celerybeat-schedule
123
+ celerybeat.pid
124
+
125
+ # SageMath parsed files
126
+ *.sage.py
127
+
128
+ # Environments
129
+ .env
130
+ .venv
131
+ env/
132
+ venv/
133
+ ENV/
134
+ env.bak/
135
+ venv.bak/
136
+
137
+ # Spyder project settings
138
+ .spyderproject
139
+ .spyproject
140
+
141
+ # Rope project settings
142
+ .ropeproject
143
+
144
+ # mkdocs documentation
145
+ /site
146
+
147
+ # mypy
148
+ .mypy_cache/
149
+ .dmypy.json
150
+ dmypy.json
151
+
152
+ # Pyre type checker
153
+ .pyre/
154
+
155
+ # pytype static type analyzer
156
+ .pytype/
157
+
158
+ # Cython debug symbols
159
+ cython_debug/
160
+
161
+ # PyCharm
162
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
163
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
164
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
165
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
166
+ #.idea/
167
+
168
+ ### Python Patch ###
169
+ # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
170
+ poetry.toml
171
+
172
+ # ruff
173
+ .ruff_cache/
174
+
175
+ # LSP config files
176
+ pyrightconfig.json
177
+
178
+ # End of https://www.toptal.com/developers/gitignore/api/python
Magazine.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import pathlib
3
+ import xml.etree.ElementTree as ET
4
+ from dataclasses import asdict, dataclass
5
+ from typing import Any, Dict, List, TypedDict
6
+
7
+ from PIL import Image
8
+ from PIL.Image import Image as PilImage
9
+
10
+ import datasets as ds
11
+ from datasets.utils.logging import get_logger
12
+
13
+ logger = get_logger(__name__)
14
+
15
+ JsonDict = Dict[str, Any]
16
+
17
+ _DESCRIPTION = "A large-scale magazine layout dataset with fine-grained layout annotations and keyword labeling"
18
+
19
+ _CITATION = """\
20
+ @article{zheng2019content,
21
+ title={Content-aware generative modeling of graphic design layouts},
22
+ author={Zheng, Xinru and Qiao, Xiaotian and Cao, Ying and Lau, Rynson WH},
23
+ journal={ACM Transactions on Graphics (TOG)},
24
+ volume={38},
25
+ number={4},
26
+ pages={1--15},
27
+ year={2019},
28
+ publisher={ACM New York, NY, USA}
29
+ }
30
+ """
31
+
32
+ _HOMEPAGE = "https://xtqiao.com/projects/content_aware_layout/"
33
+
34
+ _LICENSE = """\
35
+ Copyright (c) 2019, Xiaotian Qiao
36
+ All rights reserved.
37
+
38
+ This code is copyrighted by the authors and is for non-commercial research
39
+ purposes only.
40
+
41
+ Redistribution and use in source and binary forms, with or without
42
+ modification, are permitted provided that the following conditions are met:
43
+
44
+ * Redistributions of source code must retain the above copyright notice, this
45
+ list of conditions and the following disclaimer.
46
+
47
+ * Redistributions in binary form must reproduce the above copyright notice,
48
+ this list of conditions and the following disclaimer in the documentation
49
+ and/or other materials provided with the distribution.
50
+
51
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
52
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
53
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
54
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
55
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
56
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
57
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
58
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
59
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61
+ """
62
+
63
+
64
+ class URLs(TypedDict):
65
+ image: str
66
+ layout: str
67
+
68
+
69
+ _URLS: URLs = {
70
+ # The author of this loading script has uploaded the image and layout annotation files to the HuggingFace's private repository to facilitate testing.
71
+ # If you are using this loading script, please download the annotations from the appropriate channels, such as the OneDrive link provided by the Magazine dataset's author.
72
+ # (To the author of Magazine dataset, if there are any issues regarding this matter, please contact us. We will address it promptly.)
73
+ "image": "https://huggingface.co/datasets/shunk031/Magazine-private/resolve/main/MagImage.zip",
74
+ "layout": "https://huggingface.co/datasets/shunk031/Magazine-private/resolve/main/MagLayout.zip",
75
+ }
76
+
77
+
78
+ @dataclass
79
+ class LayoutSize(object):
80
+ width: int
81
+ height: int
82
+
83
+
84
+ @dataclass
85
+ class LayoutElement(object):
86
+ label: str
87
+ polygon_x: List[int]
88
+ polygon_y: List[int]
89
+
90
+ @classmethod
91
+ def parse_polygon(cls, polygon_str: str) -> List[float]:
92
+ return list(map(lambda x: float(x), polygon_str.split()))
93
+
94
+ @classmethod
95
+ def parse_polygons(cls, json_dict: JsonDict) -> JsonDict:
96
+ json_dict["polygon_x"] = cls.parse_polygon(json_dict["polygon_x"])
97
+ json_dict["polygon_y"] = cls.parse_polygon(json_dict["polygon_y"])
98
+ return json_dict
99
+
100
+ @classmethod
101
+ def from_dict(cls, json_dict: JsonDict) -> "LayoutElement":
102
+ json_dict = cls.parse_polygons(json_dict)
103
+ return cls(**json_dict)
104
+
105
+
106
+ def get_filename(annotation: ET.Element) -> str:
107
+ filename = annotation.find("filename")
108
+ assert filename is not None
109
+ return filename.text
110
+
111
+
112
+ def get_layout_category(annotation: ET.Element) -> str:
113
+ elem = annotation.find("category")
114
+ assert elem is not None
115
+ return elem.text
116
+
117
+
118
+ def get_layout_size(annotation: ET.Element) -> LayoutSize:
119
+ size = annotation.find("size")
120
+ assert size is not None
121
+
122
+ h_elem = size.find("height")
123
+ assert h_elem is not None
124
+
125
+ w_elem = size.find("width")
126
+ assert w_elem is not None
127
+
128
+ return LayoutSize(width=int(w_elem.text), height=int(h_elem.text))
129
+
130
+
131
+ def get_layout_elements(annotation: ET.Element) -> List[LayoutElement]:
132
+ layouts = annotation.find("layout")
133
+ assert layouts is not None
134
+
135
+ elements = layouts.findall("element")
136
+ layout_elements = [LayoutElement.from_dict(element.attrib) for element in elements]
137
+ return layout_elements
138
+
139
+
140
+ def get_keywords(annotation: ET.Element) -> List[str]:
141
+ texts = annotation.find("text")
142
+ assert texts is not None
143
+ keywords = texts.findall("keyword")
144
+ return [keyword.text for keyword in keywords]
145
+
146
+
147
+ def load_image(file_path: pathlib.Path) -> PilImage:
148
+ return Image.open(file_path)
149
+
150
+
151
+ def load_images(
152
+ image_base_dir: pathlib.Path, category: str, filename: str
153
+ ) -> List[PilImage]:
154
+ image_files = (image_base_dir / category).glob(f"{filename}_*")
155
+ return [load_image(file_path) for file_path in image_files]
156
+
157
+
158
+ @dataclass
159
+ class LayoutAnnotation(object):
160
+ filename: str
161
+ category: str
162
+ size: LayoutSize
163
+ elements: List[LayoutElement]
164
+ keywords: List[str]
165
+ images: List[PilImage]
166
+
167
+ @classmethod
168
+ def get_annotation_from_xml(cls, xml_file: pathlib.Path) -> ET.Element:
169
+ tree = ET.parse(xml_file)
170
+ return tree.getroot()
171
+
172
+ @classmethod
173
+ def from_xml(
174
+ cls, xml_file: pathlib.Path, image_base_dir: pathlib.Path
175
+ ) -> "LayoutAnnotation":
176
+ annotation = cls.get_annotation_from_xml(xml_file)
177
+ filename = get_filename(
178
+ annotation=annotation,
179
+ )
180
+ category = get_layout_category(
181
+ annotation=annotation,
182
+ )
183
+ layout_size = get_layout_size(
184
+ annotation=annotation,
185
+ )
186
+ layout_elements = get_layout_elements(
187
+ annotation=annotation,
188
+ )
189
+ keywords = get_keywords(
190
+ annotation=annotation,
191
+ )
192
+ images = load_images(
193
+ image_base_dir=image_base_dir,
194
+ category=category,
195
+ filename=filename,
196
+ )
197
+ return cls(
198
+ filename=filename,
199
+ category=category,
200
+ size=layout_size,
201
+ elements=layout_elements,
202
+ keywords=keywords,
203
+ images=images,
204
+ )
205
+
206
+
207
+ class MagazineDataset(ds.GeneratorBasedBuilder):
208
+ VERSION = ds.Version("1.0.0")
209
+ BUILDER_CONFIGS = [ds.BuilderConfig(version=VERSION, description=_DESCRIPTION)]
210
+
211
+ @property
212
+ def _manual_download_instructions(self) -> str:
213
+ return (
214
+ "To use Magazine dataset, you need to download the annotations "
215
+ "from the OneDrive in the official webpage "
216
+ "(https://portland-my.sharepoint.com/:f:/g/personal/xqiao6-c_my_cityu_edu_hk/EhmRh5SFoQ9Hjl_aRjCOltkBKFYefiSagR6QLJ7pWvs3Ww?e=y8HO5Q)."
217
+ )
218
+
219
+ def _info(self) -> ds.DatasetInfo:
220
+ features = ds.Features(
221
+ {
222
+ "filename": ds.Value("string"),
223
+ "category": ds.ClassLabel(
224
+ num_classes=6,
225
+ names=["fashion", "food", "news", "science", "travel", "wedding"],
226
+ ),
227
+ "size": {
228
+ "width": ds.Value("int64"),
229
+ "height": ds.Value("int64"),
230
+ },
231
+ "elements": ds.Sequence(
232
+ {
233
+ "label": ds.ClassLabel(
234
+ num_classes=5,
235
+ names=[
236
+ "text",
237
+ "image",
238
+ "headline",
239
+ "text-over-image",
240
+ "headline-over-image",
241
+ ],
242
+ ),
243
+ "polygon_x": ds.Sequence(ds.Value("float32")),
244
+ "polygon_y": ds.Sequence(ds.Value("float32")),
245
+ }
246
+ ),
247
+ "keywords": ds.Sequence(ds.Value("string")),
248
+ "images": ds.Sequence(ds.Image()),
249
+ }
250
+ )
251
+ return ds.DatasetInfo(
252
+ description=_DESCRIPTION,
253
+ citation=_CITATION,
254
+ homepage=_HOMEPAGE,
255
+ license=_LICENSE,
256
+ features=features,
257
+ )
258
+
259
+ def _download_from_hf(self, dl_manager: ds.DownloadManager) -> URLs:
260
+ return dl_manager.download_and_extract(_URLS)
261
+
262
+ def _download_from_local(self, dl_manager: ds.DownloadManager) -> URLs:
263
+ assert dl_manager.manual_dir is not None, dl_manager.manual_dir
264
+ dir_path = os.path.expanduser(dl_manager.manual_dir)
265
+
266
+ image_zip_path = os.path.join(dir_path, "MagImage.zip")
267
+ layout_zip_path = os.path.join(dir_path, "MagLayout.zip")
268
+
269
+ if (
270
+ not os.path.exists(dir_path)
271
+ or not os.path.exists(image_zip_path)
272
+ or not os.path.exists(layout_zip_path)
273
+ ):
274
+ raise FileNotFoundError(
275
+ "Make sure you have downloaded and placed the `MagImage.zip` and `MagLayout.zip` correctly. "
276
+ 'Furthermore, you should check that a manual dir via `datasets.load_dataset("shunk031/Magazine", data_dir=...)` '
277
+ "that includes zip files from the downloaded files. "
278
+ f"Manual downloaded instructions: {self._manual_download_instructions}"
279
+ )
280
+ return dl_manager.extract(
281
+ path_or_paths={
282
+ "image": image_zip_path,
283
+ "layout": layout_zip_path,
284
+ }
285
+ )
286
+
287
+ def _split_generators(self, dl_manager: ds.DownloadManager):
288
+ file_paths = (
289
+ self._download_from_hf(dl_manager)
290
+ if dl_manager.download_config.token
291
+ else self._download_from_local(dl_manager)
292
+ )
293
+ layout_xml_dir = (
294
+ pathlib.Path(file_paths["layout"]) / "layoutdata" / "annotations"
295
+ )
296
+ image_base_dir = pathlib.Path(file_paths["image"]) / "images"
297
+
298
+ return [
299
+ ds.SplitGenerator(
300
+ name=ds.Split.TRAIN,
301
+ gen_kwargs={
302
+ "layout_xml_dir": layout_xml_dir,
303
+ "image_base_dir": image_base_dir,
304
+ },
305
+ )
306
+ ]
307
+
308
+ def _generate_examples(
309
+ self, layout_xml_dir: pathlib.Path, image_base_dir: pathlib.Path
310
+ ):
311
+ xml_files = [f for f in layout_xml_dir.iterdir() if f.suffix == ".xml"]
312
+ for i, xml_file in enumerate(xml_files):
313
+ layout_annotation = LayoutAnnotation.from_xml(
314
+ xml_file=xml_file,
315
+ image_base_dir=image_base_dir,
316
+ )
317
+ yield i, asdict(layout_annotation)
README.md ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - machine-generated
4
+ language:
5
+ - en
6
+ language_creators:
7
+ - found
8
+ license:
9
+ - unknown
10
+ multilinguality:
11
+ - monolingual
12
+ pretty_name: Magazine
13
+ size_categories: []
14
+ source_datasets:
15
+ - original
16
+ tags:
17
+ - graphic design
18
+ - layout
19
+ - content-aware
20
+ task_categories:
21
+ - image-to-image
22
+ - text-to-image
23
+ - unconditional-image-generation
24
+ task_ids: []
25
+ ---
26
+
27
+ # Dataset Card for Magazine dataset
28
+
29
+ ## Table of Contents
30
+ - [Dataset Card Creation Guide](#dataset-card-creation-guide)
31
+ - [Table of Contents](#table-of-contents)
32
+ - [Dataset Description](#dataset-description)
33
+ - [Dataset Summary](#dataset-summary)
34
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
35
+ - [Languages](#languages)
36
+ - [Dataset Structure](#dataset-structure)
37
+ - [Data Instances](#data-instances)
38
+ - [Data Fields](#data-fields)
39
+ - [Data Splits](#data-splits)
40
+ - [Dataset Creation](#dataset-creation)
41
+ - [Curation Rationale](#curation-rationale)
42
+ - [Source Data](#source-data)
43
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
44
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
45
+ - [Annotations](#annotations)
46
+ - [Annotation process](#annotation-process)
47
+ - [Who are the annotators?](#who-are-the-annotators)
48
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
49
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
50
+ - [Social Impact of Dataset](#social-impact-of-dataset)
51
+ - [Discussion of Biases](#discussion-of-biases)
52
+ - [Other Known Limitations](#other-known-limitations)
53
+ - [Additional Information](#additional-information)
54
+ - [Dataset Curators](#dataset-curators)
55
+ - [Licensing Information](#licensing-information)
56
+ - [Citation Information](#citation-information)
57
+ - [Contributions](#contributions)
58
+
59
+ ## Dataset Description
60
+
61
+ - **Homepage:** https://xtqiao.com/projects/content_aware_layout/
62
+ - **Repository:** https://github.com/shunk031/huggingface-datasets_Magazine
63
+ - **Paper (SIGGRAPH2019):** https://dl.acm.org/doi/10.1145/3306346.3322971
64
+
65
+ ### Dataset Summary
66
+
67
+ A large-scale magazine layout dataset with fine-grained layout annotations and keyword labeling.
68
+
69
+ ### Supported Tasks and Leaderboards
70
+
71
+ [More Information Needed]
72
+
73
+ ### Languages
74
+
75
+ [More Information Needed]
76
+
77
+ ## Dataset Structure
78
+
79
+ ### Data Instances
80
+
81
+ To use Magazine dataset, you need to download the image and layout annotations from the [OneDrive](https://portland-my.sharepoint.com/:f:/g/personal/xqiao6-c_my_cityu_edu_hk/EhmRh5SFoQ9Hjl_aRjCOltkBKFYefiSagR6QLJ7pWvs3Ww?e=y8HO5Q) in the official page.
82
+ Then place the downloaded files in the following structure and specify its path.
83
+
84
+ ```shell
85
+ /path/to/datasets
86
+ ├── MagImage.zip
87
+ └── MagLayout.zip
88
+ ```
89
+
90
+ ```python
91
+ import datasets as ds
92
+
93
+ dataset = ds.load_dataset(
94
+ path="shunk031/Magazine",
95
+ data_dir="/path/to/datasets/", # Specify the path of the downloaded directory.
96
+ )
97
+ ```
98
+
99
+ ### Data Fields
100
+
101
+ [More Information Needed]
102
+
103
+ ### Data Splits
104
+
105
+ [More Information Needed]
106
+
107
+ ## Dataset Creation
108
+
109
+ ### Curation Rationale
110
+
111
+ [More Information Needed]
112
+
113
+ ### Source Data
114
+
115
+ [More Information Needed]
116
+
117
+ #### Initial Data Collection and Normalization
118
+
119
+ [More Information Needed]
120
+
121
+ #### Who are the source language producers?
122
+
123
+ [More Information Needed]
124
+
125
+ ### Annotations
126
+
127
+ [More Information Needed]
128
+
129
+ #### Annotation process
130
+
131
+ [More Information Needed]
132
+
133
+ #### Who are the annotators?
134
+
135
+ [More Information Needed]
136
+
137
+ ### Personal and Sensitive Information
138
+
139
+ [More Information Needed]
140
+
141
+ ## Considerations for Using the Data
142
+
143
+ ### Social Impact of Dataset
144
+
145
+ [More Information Needed]
146
+
147
+ ### Discussion of Biases
148
+
149
+ [More Information Needed]
150
+
151
+ ### Other Known Limitations
152
+
153
+ [More Information Needed]
154
+
155
+ ## Additional Information
156
+
157
+ ### Dataset Curators
158
+
159
+ [More Information Needed]
160
+
161
+ ### Licensing Information
162
+
163
+ [More Information Needed]
164
+
165
+ ### Citation Information
166
+
167
+ ```bibtex
168
+ @article{zheng2019content,
169
+ title={Content-aware generative modeling of graphic design layouts},
170
+ author={Zheng, Xinru and Qiao, Xiaotian and Cao, Ying and Lau, Rynson WH},
171
+ journal={ACM Transactions on Graphics (TOG)},
172
+ volume={38},
173
+ number={4},
174
+ pages={1--15},
175
+ year={2019},
176
+ publisher={ACM New York, NY, USA}
177
+ }
178
+ ```
179
+
180
+ ### Contributions
181
+
182
+ Thanks to [Xinru Zheng and Xiaotian Qiao*](https://xtqiao.com/projects/content_aware_layout/) for creating this dataset.
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "huggingface-datasets-magazine"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Shunsuke KITADA <shunsuke.kitada.0831@gmail.com>"]
6
+ readme = "README.md"
7
+
8
+ [tool.poetry.dependencies]
9
+ python = "^3.9"
10
+ datasets = {extras = ["vision"], version = "^2.14.6"}
11
+
12
+
13
+ [tool.poetry.group.dev.dependencies]
14
+ ruff = "^0.1.3"
15
+ black = "^23.10.1"
16
+ mypy = "^1.6.1"
17
+ pytest = "^7.4.3"
18
+
19
+ [build-system]
20
+ requires = ["poetry-core"]
21
+ build-backend = "poetry.core.masonry.api"
tests/Magazine_test.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import pytest
4
+
5
+ import datasets as ds
6
+
7
+
8
+ @pytest.fixture
9
+ def dataset_path() -> str:
10
+ return "Magazine.py"
11
+
12
+
13
+ @pytest.mark.skipif(
14
+ condition=bool(os.environ.get("CI", False)),
15
+ reason=(
16
+ "Because this loading script downloads a large dataset, "
17
+ "we will skip running it on CI."
18
+ ),
19
+ )
20
+ @pytest.mark.parametrize(
21
+ argnames=("expected_num_dataset",),
22
+ argvalues=((3919,),),
23
+ )
24
+ def test_load_dataset(dataset_path: str, expected_num_dataset: int):
25
+ dataset = ds.load_dataset(path=dataset_path, token=True)
26
+
27
+ assert dataset["train"].num_rows == expected_num_dataset
tests/__init__.py ADDED
File without changes