Datasets:

Tasks:
Other
Languages:
Chinese
ArXiv:
License:
shunk031 commited on
Commit
faf2154
1 Parent(s): 679f627

Initialize (#1)

Browse files

* add files

* update poetry files

* update

* update README.md

* add settings for CI

* update

.github/workflows/ci.yaml ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: CI
2
+
3
+ on:
4
+ push:
5
+ branches: [main]
6
+ pull_request:
7
+ branches: [main]
8
+ paths-ignore:
9
+ - "README.md"
10
+
11
+ jobs:
12
+ test:
13
+ runs-on: ubuntu-latest
14
+ strategy:
15
+ matrix:
16
+ python-version: ["3.9", "3.10"]
17
+
18
+ steps:
19
+ - uses: actions/checkout@v3
20
+
21
+ - name: Set up Python ${{ matrix.python-version }}
22
+ uses: actions/setup-python@v4
23
+ with:
24
+ python-version: ${{ matrix.python-version }}
25
+
26
+ - name: Install dependencies
27
+ run: |
28
+ pip install -U pip setuptools wheel poetry
29
+ poetry install
30
+
31
+ - name: Format
32
+ run: |
33
+ poetry run black --check .
34
+
35
+ - name: Lint
36
+ run: |
37
+ poetry run ruff .
38
+
39
+ - name: Type check
40
+ run: |
41
+ poetry run mypy . \
42
+ --ignore-missing-imports \
43
+ --no-strict-optional \
44
+ --no-site-packages \
45
+ --cache-dir=/dev/null
46
+
47
+ - name: Run tests
48
+ run: |
49
+ poetry run pytest --color=yes -rf
.github/workflows/push_to_hub.yaml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face Hub
2
+
3
+ on:
4
+ workflow_run:
5
+ workflows:
6
+ - CI
7
+ branches:
8
+ - main
9
+ types:
10
+ - completed
11
+
12
+ jobs:
13
+ push_to_hub:
14
+ runs-on: ubuntu-latest
15
+
16
+ steps:
17
+ - name: Checkout repository
18
+ uses: actions/checkout@v3
19
+
20
+ - name: Push to Huggingface hub
21
+ env:
22
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
23
+ HF_USERNAME: ${{ secrets.HF_USERNAME }}
24
+ run: |
25
+ git fetch --unshallow
26
+ git push --force https://${HF_USERNAME}:${HF_TOKEN}@huggingface.co/datasets/${HF_USERNAME}/CGL-Dataset-v2 main
.gitignore ADDED
@@ -0,0 +1,178 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Created by https://www.toptal.com/developers/gitignore/api/python
2
+ # Edit at https://www.toptal.com/developers/gitignore?templates=python
3
+
4
+ *.tar.gz
5
+
6
+ ### Python ###
7
+ # Byte-compiled / optimized / DLL files
8
+ __pycache__/
9
+ *.py[cod]
10
+ *$py.class
11
+
12
+ # C extensions
13
+ *.so
14
+
15
+ # Distribution / packaging
16
+ .Python
17
+ build/
18
+ develop-eggs/
19
+ dist/
20
+ downloads/
21
+ eggs/
22
+ .eggs/
23
+ lib/
24
+ lib64/
25
+ parts/
26
+ sdist/
27
+ var/
28
+ wheels/
29
+ share/python-wheels/
30
+ *.egg-info/
31
+ .installed.cfg
32
+ *.egg
33
+ MANIFEST
34
+
35
+ # PyInstaller
36
+ # Usually these files are written by a python script from a template
37
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
38
+ *.manifest
39
+ *.spec
40
+
41
+ # Installer logs
42
+ pip-log.txt
43
+ pip-delete-this-directory.txt
44
+
45
+ # Unit test / coverage reports
46
+ htmlcov/
47
+ .tox/
48
+ .nox/
49
+ .coverage
50
+ .coverage.*
51
+ .cache
52
+ nosetests.xml
53
+ coverage.xml
54
+ *.cover
55
+ *.py,cover
56
+ .hypothesis/
57
+ .pytest_cache/
58
+ cover/
59
+
60
+ # Translations
61
+ *.mo
62
+ *.pot
63
+
64
+ # Django stuff:
65
+ *.log
66
+ local_settings.py
67
+ db.sqlite3
68
+ db.sqlite3-journal
69
+
70
+ # Flask stuff:
71
+ instance/
72
+ .webassets-cache
73
+
74
+ # Scrapy stuff:
75
+ .scrapy
76
+
77
+ # Sphinx documentation
78
+ docs/_build/
79
+
80
+ # PyBuilder
81
+ .pybuilder/
82
+ target/
83
+
84
+ # Jupyter Notebook
85
+ .ipynb_checkpoints
86
+
87
+ # IPython
88
+ profile_default/
89
+ ipython_config.py
90
+
91
+ # pyenv
92
+ # For a library or package, you might want to ignore these files since the code is
93
+ # intended to run in multiple environments; otherwise, check them in:
94
+ .python-version
95
+
96
+ # pipenv
97
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
98
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
99
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
100
+ # install all needed dependencies.
101
+ #Pipfile.lock
102
+
103
+ # poetry
104
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
105
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
106
+ # commonly ignored for libraries.
107
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
108
+ #poetry.lock
109
+
110
+ # pdm
111
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
112
+ #pdm.lock
113
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
114
+ # in version control.
115
+ # https://pdm.fming.dev/#use-with-ide
116
+ .pdm.toml
117
+
118
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
119
+ __pypackages__/
120
+
121
+ # Celery stuff
122
+ celerybeat-schedule
123
+ celerybeat.pid
124
+
125
+ # SageMath parsed files
126
+ *.sage.py
127
+
128
+ # Environments
129
+ .env
130
+ .venv
131
+ env/
132
+ venv/
133
+ ENV/
134
+ env.bak/
135
+ venv.bak/
136
+
137
+ # Spyder project settings
138
+ .spyderproject
139
+ .spyproject
140
+
141
+ # Rope project settings
142
+ .ropeproject
143
+
144
+ # mkdocs documentation
145
+ /site
146
+
147
+ # mypy
148
+ .mypy_cache/
149
+ .dmypy.json
150
+ dmypy.json
151
+
152
+ # Pyre type checker
153
+ .pyre/
154
+
155
+ # pytype static type analyzer
156
+ .pytype/
157
+
158
+ # Cython debug symbols
159
+ cython_debug/
160
+
161
+ # PyCharm
162
+ # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
163
+ # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
164
+ # and can be added to the global gitignore or merged into this file. For a more nuclear
165
+ # option (not recommended) you can uncomment the following to ignore the entire idea folder.
166
+ #.idea/
167
+
168
+ ### Python Patch ###
169
+ # Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
170
+ poetry.toml
171
+
172
+ # ruff
173
+ .ruff_cache/
174
+
175
+ # LSP config files
176
+ pyrightconfig.json
177
+
178
+ # End of https://www.toptal.com/developers/gitignore/api/python
CGL-Dataset-v2.py ADDED
@@ -0,0 +1,605 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import json
3
+ import os
4
+ import pathlib
5
+ from collections import defaultdict
6
+ from dataclasses import asdict, dataclass
7
+ from typing import Any, Dict, List, Optional, Tuple, TypedDict, Union
8
+
9
+ import datasets as ds
10
+ import numpy as np
11
+ import torch
12
+ from datasets.utils.logging import get_logger
13
+ from PIL import Image
14
+ from PIL.Image import Image as PilImage
15
+ from pycocotools import mask as cocomask
16
+ from tqdm import tqdm
17
+
18
+ logger = get_logger(__name__)
19
+
20
+ JsonDict = Dict[str, Any]
21
+ ImageId = int
22
+ CategoryId = int
23
+ AnnotationId = int
24
+ Bbox = Tuple[float, float, float, float]
25
+
26
+
27
+ _DESCRIPTION = """\
28
+ CGL-Dataset V2 is a dataset for the task of automatic graphic layout design of advertising posters, containing 60,548 training samples and 1035 testing samples. It is an extension of CGL-Dataset.
29
+ """
30
+
31
+ _CITATION = """\
32
+ @inproceedings{li2023relation,
33
+ title={Relation-Aware Diffusion Model for Controllable Poster Layout Generation},
34
+ author={Li, Fengheng and Liu, An and Feng, Wei and Zhu, Honghe and Li, Yaoyu and Zhang, Zheng and Lv, Jingjing and Zhu, Xin and Shen, Junjie and Lin, Zhangang},
35
+ booktitle={Proceedings of the 32nd ACM international conference on information & knowledge management},
36
+ pages={1249--1258},
37
+ year={2023}
38
+ }
39
+ """
40
+
41
+ _HOMEPAGE = "https://github.com/liuan0803/RADM"
42
+
43
+ _LICENSE = """\
44
+ Unknown
45
+ """
46
+
47
+
48
+ class UncompressedRLE(TypedDict):
49
+ counts: List[int]
50
+ size: Tuple[int, int]
51
+
52
+
53
+ class CompressedRLE(TypedDict):
54
+ counts: bytes
55
+ size: Tuple[int, int]
56
+
57
+
58
+ @dataclass
59
+ class ImageData(object):
60
+ image_id: ImageId
61
+ file_name: str
62
+ width: int
63
+ height: int
64
+
65
+ @classmethod
66
+ def from_dict(cls, json_dict: JsonDict) -> "ImageData":
67
+ return cls(
68
+ image_id=json_dict["id"],
69
+ file_name=json_dict["file_name"],
70
+ width=json_dict["width"],
71
+ height=json_dict["height"],
72
+ )
73
+
74
+ @property
75
+ def shape(self) -> Tuple[int, int]:
76
+ return (self.height, self.width)
77
+
78
+
79
+ @dataclass
80
+ class CategoryData(object):
81
+ category_id: int
82
+ name: str
83
+ supercategory: str
84
+
85
+ @classmethod
86
+ def from_dict(cls, json_dict: JsonDict) -> "CategoryData":
87
+ return cls(
88
+ category_id=json_dict["id"],
89
+ name=json_dict["name"],
90
+ supercategory=json_dict["supercategory"],
91
+ )
92
+
93
+
94
+ @dataclass
95
+ class VisualAnnotationData(object):
96
+ annotation_id: AnnotationId
97
+ image_id: ImageId
98
+ segmentation: Union[np.ndarray, CompressedRLE]
99
+ area: float
100
+ iscrowd: bool
101
+ bbox: Bbox
102
+ category_id: int
103
+
104
+ @classmethod
105
+ def compress_rle(
106
+ cls,
107
+ segmentation: Union[List[List[float]], UncompressedRLE],
108
+ iscrowd: bool,
109
+ height: int,
110
+ width: int,
111
+ ) -> CompressedRLE:
112
+ if iscrowd:
113
+ rle = cocomask.frPyObjects(segmentation, h=height, w=width)
114
+ else:
115
+ rles = cocomask.frPyObjects(segmentation, h=height, w=width)
116
+ rle = cocomask.merge(rles) # type: ignore
117
+
118
+ return rle # type: ignore
119
+
120
+ @classmethod
121
+ def rle_segmentation_to_binary_mask(
122
+ cls, segmentation, iscrowd: bool, height: int, width: int
123
+ ) -> np.ndarray:
124
+ rle = cls.compress_rle(
125
+ segmentation=segmentation, iscrowd=iscrowd, height=height, width=width
126
+ )
127
+ return cocomask.decode(rle) # type: ignore
128
+
129
+ @classmethod
130
+ def rle_segmentation_to_mask(
131
+ cls,
132
+ segmentation: Union[List[List[float]], UncompressedRLE],
133
+ iscrowd: bool,
134
+ height: int,
135
+ width: int,
136
+ ) -> np.ndarray:
137
+ binary_mask = cls.rle_segmentation_to_binary_mask(
138
+ segmentation=segmentation, iscrowd=iscrowd, height=height, width=width
139
+ )
140
+ return binary_mask * 255
141
+
142
+ @classmethod
143
+ def from_dict(
144
+ cls,
145
+ json_dict: JsonDict,
146
+ images: Dict[ImageId, ImageData],
147
+ decode_rle: bool,
148
+ ) -> "VisualAnnotationData":
149
+ segmentation = json_dict["segmentation"]
150
+ image_id = json_dict["image_id"]
151
+ image_data = images[image_id]
152
+ iscrowd = bool(json_dict["iscrowd"])
153
+
154
+ segmentation_mask = (
155
+ cls.rle_segmentation_to_mask(
156
+ segmentation=segmentation,
157
+ iscrowd=iscrowd,
158
+ height=image_data.height,
159
+ width=image_data.width,
160
+ )
161
+ if decode_rle
162
+ else cls.compress_rle(
163
+ segmentation=segmentation,
164
+ iscrowd=iscrowd,
165
+ height=image_data.height,
166
+ width=image_data.width,
167
+ )
168
+ )
169
+ return cls(
170
+ annotation_id=json_dict["id"],
171
+ image_id=image_id,
172
+ segmentation=segmentation_mask, # type: ignore
173
+ area=json_dict["area"],
174
+ iscrowd=iscrowd,
175
+ bbox=json_dict["bbox"],
176
+ category_id=json_dict["category_id"],
177
+ )
178
+
179
+
180
+ @dataclass
181
+ class UserSelectedValue(object):
182
+ name: str
183
+
184
+
185
+ @dataclass
186
+ class Point(object):
187
+ x: int
188
+ y: int
189
+
190
+
191
+ @dataclass
192
+ class TextData(object):
193
+ user_selected_value: UserSelectedValue
194
+ category_description: str
195
+ points: List[Point]
196
+
197
+ @classmethod
198
+ def from_dict(cls, json_dict: JsonDict) -> "TextData":
199
+ return cls(
200
+ user_selected_value=UserSelectedValue(**json_dict["userSelectedValue"]),
201
+ points=[Point(**p) for p in json_dict["points"]],
202
+ category_description=json_dict["categoryDesc"],
203
+ )
204
+
205
+
206
+ class TextAnnotationData(object):
207
+ @classmethod
208
+ def from_dict(cls, *args, **kwargs):
209
+ raise NotImplementedError
210
+
211
+
212
+ @dataclass
213
+ class TextAnnotationTrainData(TextAnnotationData):
214
+ is_sample: bool
215
+ image: str
216
+ rotate: float
217
+ data: List[TextData]
218
+ pin: str
219
+
220
+ @property
221
+ def id_images(self) -> int:
222
+ # 'ali_anno_1/22.png' -> ['ali_anno_1', '22.png']
223
+ _, id_filename = self.image.split("/")
224
+ # 22.png -> ['22', '.png']
225
+ root, _ = os.path.splitext(id_filename)
226
+ return int(root)
227
+
228
+ @classmethod
229
+ def from_dict(cls, json_dict: JsonDict) -> "TextAnnotationTrainData":
230
+ text_data = [TextData.from_dict(d) for d in json_dict["data"]]
231
+ return cls(
232
+ is_sample=bool(int(json_dict["isSample"])),
233
+ image=json_dict["image"],
234
+ rotate=json_dict["rotate"],
235
+ pin=json_dict["pin"],
236
+ data=text_data,
237
+ )
238
+
239
+
240
+ @dataclass
241
+ class TextAnnotationTestData(TextAnnotationData):
242
+ image_filename: str
243
+ product_detail_highlighted_word: Optional[List[str]] = None
244
+ blc_text: Optional[List[str]] = None
245
+ adv_sellpoint: Optional[List[str]] = None
246
+
247
+ @classmethod
248
+ def from_dict(
249
+ cls, image_filename: str, json_dict: JsonDict
250
+ ) -> "TextAnnotationTestData":
251
+ return cls(
252
+ image_filename=image_filename,
253
+ product_detail_highlighted_word=json_dict.get("productDetailHighlightWord"),
254
+ blc_text=json_dict.get("blc_text"),
255
+ adv_sellpoint=json_dict.get("adv_sellpoint"),
256
+ )
257
+
258
+
259
+ @dataclass
260
+ class TextFeatureData(object):
261
+ feats: List[torch.Tensor]
262
+ num: Optional[int] = None
263
+ pos: Optional[List[Tuple[int, int, int, int]]] = None
264
+
265
+ def __post_init__(self):
266
+ if self.num is None:
267
+ self.num = len(self.feats)
268
+
269
+ assert self.num == len(self.feats)
270
+
271
+ if self.pos:
272
+ assert self.num == len(self.pos) == len(self.feats)
273
+
274
+
275
+ def load_json(json_path: pathlib.Path) -> JsonDict:
276
+ logger.info(f"Load from {json_path}")
277
+ with json_path.open("r") as rf:
278
+ json_dict = json.load(rf)
279
+ return json_dict
280
+
281
+
282
+ def load_image(image_path: pathlib.Path) -> PilImage:
283
+ logger.info(f"Load from {image_path}")
284
+ return Image.open(image_path)
285
+
286
+
287
+ def load_images_data(
288
+ image_dicts: List[JsonDict],
289
+ tqdm_desc="Load images",
290
+ ) -> Dict[ImageId, ImageData]:
291
+ images = {}
292
+ for image_dict in tqdm(image_dicts, desc=tqdm_desc):
293
+ image_data = ImageData.from_dict(image_dict)
294
+ images[image_data.image_id] = image_data
295
+ return images
296
+
297
+
298
+ def load_categories_data(
299
+ category_dicts: List[JsonDict],
300
+ tqdm_desc: str = "Load categories",
301
+ ) -> Dict[CategoryId, CategoryData]:
302
+ categories = {}
303
+ for category_dict in tqdm(category_dicts, desc=tqdm_desc):
304
+ category_data = CategoryData.from_dict(category_dict)
305
+ categories[category_data.category_id] = category_data
306
+ return categories
307
+
308
+
309
+ def _load_train_texts_data(
310
+ txt_path: pathlib.Path,
311
+ image_dicts: List[JsonDict],
312
+ tqdm_desc: str = "Load text annotations for training",
313
+ ) -> Dict[ImageId, TextAnnotationTrainData]:
314
+ assert txt_path.stem == "train", txt_path
315
+
316
+ texts: Dict[ImageId, TextAnnotationTrainData] = {}
317
+ with txt_path.open("r") as rf:
318
+ for line in tqdm(rf, desc=tqdm_desc):
319
+ text_dict = ast.literal_eval(line)
320
+ text_data_ann = TextAnnotationTrainData.from_dict(text_dict)
321
+ image_dict = image_dicts[text_data_ann.id_images]
322
+ image_id = image_dict["id"]
323
+
324
+ if image_id in texts:
325
+ raise ValueError(f"Duplicate image id: {image_id}")
326
+
327
+ texts[image_id] = text_data_ann
328
+ return texts
329
+
330
+
331
+ def _load_test_texts_data(
332
+ txt_path: pathlib.Path,
333
+ images: Dict[ImageId, ImageData],
334
+ tqdm_desc: str = "Load text annotations for test",
335
+ ) -> Dict[ImageId, TextAnnotationTestData]:
336
+ assert txt_path.stem == "test", txt_path
337
+ images_dict = {image.file_name: image for image in images.values()}
338
+
339
+ texts = {}
340
+ with txt_path.open("r") as rf:
341
+ for line in tqdm(rf, desc=tqdm_desc):
342
+ image_filename, json_str = line.split("\t")
343
+ text_dict = ast.literal_eval(json_str)
344
+ text_data_ann = TextAnnotationTestData.from_dict(image_filename, text_dict)
345
+
346
+ image_id = images_dict[image_filename].image_id
347
+ if image_id in texts:
348
+ raise ValueError(f"Duplicate image id: {image_id}")
349
+
350
+ texts[image_id] = text_data_ann
351
+ return texts
352
+
353
+
354
+ def load_texts_data(
355
+ txt_path: pathlib.Path,
356
+ image_dicts: List[JsonDict],
357
+ images: Dict[ImageId, ImageData],
358
+ ):
359
+ if txt_path.stem == "train":
360
+ return _load_train_texts_data(
361
+ txt_path=txt_path,
362
+ image_dicts=image_dicts,
363
+ )
364
+ elif txt_path.stem == "test":
365
+ return _load_test_texts_data(
366
+ txt_path=txt_path,
367
+ images=images,
368
+ )
369
+ else:
370
+ raise ValueError(f"Unknown text file: {txt_path}")
371
+
372
+
373
+ def load_annotation_data(
374
+ label_dicts: List[JsonDict],
375
+ images: Dict[ImageId, ImageData],
376
+ decode_rle: bool,
377
+ tqdm_desc: str = "Load annotation data",
378
+ ) -> Dict[ImageId, List[VisualAnnotationData]]:
379
+ labels = defaultdict(list)
380
+ label_dicts = sorted(label_dicts, key=lambda d: d["image_id"])
381
+
382
+ for label_dict in tqdm(label_dicts, desc=tqdm_desc):
383
+ label_data = VisualAnnotationData.from_dict(
384
+ label_dict, images=images, decode_rle=decode_rle
385
+ )
386
+ labels[label_data.image_id].append(label_data)
387
+ return labels
388
+
389
+
390
+ def load_text_features(
391
+ txt_feature_dir: pathlib.Path,
392
+ image_dicts: List[JsonDict],
393
+ tqdm_desc="Load text features",
394
+ ) -> Dict[ImageId, TextFeatureData]:
395
+ text_features = {}
396
+ for image_dict in tqdm(image_dicts):
397
+ image_filename = image_dict["file_name"]
398
+ root, _ = os.path.splitext(image_filename)
399
+ txt_feature_path = txt_feature_dir / f"{root}_feats.pth"
400
+
401
+ if not txt_feature_path.exists():
402
+ # logger.warning(f"Text feature file not found: {txt_feature_path}")
403
+ continue
404
+
405
+ txt_feature_dict = torch.load(
406
+ txt_feature_path, map_location=torch.device("cpu")
407
+ )
408
+ txt_feature_data = TextFeatureData(**txt_feature_dict)
409
+
410
+ image_id = image_dict["id"]
411
+ if image_id in text_features:
412
+ raise ValueError(f"Duplicate image id: {image_id}")
413
+
414
+ text_features[image_id] = txt_feature_data
415
+ return text_features
416
+
417
+
418
+ @dataclass
419
+ class CGLDatasetV2Config(ds.BuilderConfig):
420
+ decode_rle: bool = False
421
+ include_text_features: bool = False
422
+
423
+
424
+ class CGLDatasetV2(ds.GeneratorBasedBuilder):
425
+ VERSION = ds.Version("1.0.0")
426
+ BUILDER_CONFIG_CLASS = CGLDatasetV2Config
427
+ BUILDER_CONFIGS = [
428
+ CGLDatasetV2Config(version=VERSION, description=_DESCRIPTION),
429
+ ]
430
+
431
+ def _info(self) -> ds.DatasetInfo:
432
+ segmentation_feature = (
433
+ ds.Image()
434
+ if self.config.decode_rle # type: ignore
435
+ else {
436
+ "counts": ds.Value("binary"),
437
+ "size": ds.Sequence(ds.Value("int32")),
438
+ }
439
+ )
440
+ features = ds.Features(
441
+ {
442
+ "image_id": ds.Value("int64"),
443
+ "file_name": ds.Value("string"),
444
+ "width": ds.Value("int64"),
445
+ "height": ds.Value("int64"),
446
+ "image": ds.Image(),
447
+ "annotations": ds.Sequence(
448
+ {
449
+ "annotation_id": ds.Value("int64"),
450
+ "area": ds.Value("int64"),
451
+ "bbox": ds.Sequence(ds.Value("int64")),
452
+ "category": {
453
+ "category_id": ds.Value("int64"),
454
+ "name": ds.Value("string"),
455
+ "supercategory": ds.Value("string"),
456
+ },
457
+ "category_id": ds.Value("int64"),
458
+ "image_id": ds.Value("int64"),
459
+ "iscrowd": ds.Value("bool"),
460
+ "segmentation": segmentation_feature,
461
+ }
462
+ ),
463
+ "text_annotation": {
464
+ "is_sample": ds.Value("bool"),
465
+ "image": ds.Value("string"),
466
+ "rotate": ds.Value("float32"),
467
+ "pin": ds.Value("string"),
468
+ "data": ds.Sequence(
469
+ {
470
+ "category_description": ds.Value("string"),
471
+ "points": ds.Sequence(
472
+ {"x": ds.Value("int64"), "y": ds.Value("int64")}
473
+ ),
474
+ "user_selected_value": {"name": ds.Value("string")},
475
+ }
476
+ ),
477
+ "product_detail_highlighted_word": ds.Sequence(ds.Value("string")),
478
+ "blc_text": ds.Sequence(ds.Value("string")),
479
+ "adv_sellpoint": ds.Sequence(ds.Value("string")),
480
+ },
481
+ "text_feature": {
482
+ "num": ds.Value("int64"),
483
+ "pos": ds.Sequence(ds.Sequence(ds.Value("int64"))),
484
+ "feats": ds.Sequence(ds.Sequence(ds.Sequence(ds.Value("float32")))),
485
+ },
486
+ }
487
+ )
488
+ return ds.DatasetInfo(
489
+ description=_DESCRIPTION,
490
+ citation=_CITATION,
491
+ homepage=_HOMEPAGE,
492
+ license=_LICENSE,
493
+ features=features,
494
+ )
495
+
496
+ def _split_generators(
497
+ self, dl_manager: ds.DownloadManager
498
+ ) -> List[ds.SplitGenerator]:
499
+ assert dl_manager.manual_dir is not None
500
+ base_dir_path = os.path.expanduser(dl_manager.manual_dir)
501
+
502
+ if not os.path.exists(base_dir_path):
503
+ raise FileNotFoundError()
504
+
505
+ base_dir_path = dl_manager.extract(base_dir_path)
506
+ assert isinstance(base_dir_path, str)
507
+
508
+ dir_path = pathlib.Path(base_dir_path) / "RADM_dataset"
509
+
510
+ ann_dir = dir_path / "annotations"
511
+ img_dir = dir_path / "images"
512
+ txt_dir = dir_path / "texts"
513
+ txt_feature_dir = dir_path / "text_features"
514
+
515
+ tng_ann_json_path = ann_dir / "train.json"
516
+ tst_ann_json_path = ann_dir / "test.json"
517
+
518
+ tng_img_dir = img_dir / "train"
519
+ tst_img_dir = img_dir / "test"
520
+
521
+ tng_img_json_path = tng_img_dir / "train.json"
522
+ tst_img_json_path = tst_img_dir / "test.json"
523
+
524
+ tng_txt_path = txt_dir / "train.txt"
525
+ tst_txt_path = txt_dir / "test.txt"
526
+
527
+ tng_txt_feature_dir = txt_feature_dir / "train"
528
+ tst_txt_feature_dir = txt_feature_dir / "test"
529
+
530
+ return [
531
+ ds.SplitGenerator(
532
+ name=ds.Split.TRAIN, # type: ignore
533
+ gen_kwargs={
534
+ "ann_json_path": tng_ann_json_path,
535
+ "img_dir": tng_img_dir,
536
+ "img_json_path": tng_img_json_path,
537
+ "txt_path": tng_txt_path,
538
+ "txt_feature_dir": tng_txt_feature_dir,
539
+ },
540
+ ),
541
+ ds.SplitGenerator(
542
+ name=ds.Split.TEST, # type: ignore
543
+ gen_kwargs={
544
+ "ann_json_path": tst_ann_json_path,
545
+ "img_dir": tst_img_dir,
546
+ "img_json_path": tst_img_json_path,
547
+ "txt_path": tst_txt_path,
548
+ "txt_feature_dir": tst_txt_feature_dir,
549
+ },
550
+ ),
551
+ ]
552
+
553
+ def _generate_examples(
554
+ self,
555
+ ann_json_path: pathlib.Path,
556
+ img_dir: pathlib.Path,
557
+ img_json_path: pathlib.Path,
558
+ txt_path: pathlib.Path,
559
+ txt_feature_dir: pathlib.Path,
560
+ ):
561
+ ann_json = load_json(ann_json_path)
562
+ images = load_images_data(image_dicts=ann_json["images"])
563
+ categories = load_categories_data(category_dicts=ann_json["categories"])
564
+
565
+ texts = load_texts_data(
566
+ txt_path=txt_path, image_dicts=ann_json["images"], images=images
567
+ )
568
+
569
+ text_features = (
570
+ load_text_features(
571
+ txt_feature_dir=txt_feature_dir, image_dicts=ann_json["images"]
572
+ )
573
+ if self.config.include_text_features # type: ignore
574
+ else None
575
+ )
576
+
577
+ annotations = load_annotation_data(
578
+ label_dicts=ann_json["annotations"],
579
+ images=images,
580
+ decode_rle=self.config.decode_rle, # type: ignore
581
+ )
582
+
583
+ for idx, image_id in enumerate(images.keys()):
584
+ image_data = images[image_id]
585
+ image_anns = annotations[image_id]
586
+
587
+ image = load_image(image_path=img_dir / image_data.file_name)
588
+ example = asdict(image_data)
589
+ example["image"] = image
590
+
591
+ example["annotations"] = []
592
+ for ann in image_anns:
593
+ ann_dict = asdict(ann)
594
+ category = categories[ann.category_id]
595
+ ann_dict["category"] = asdict(category)
596
+ example["annotations"].append(ann_dict)
597
+
598
+ text_data = texts.get(image_id)
599
+ example["text_annotation"] = asdict(text_data) if text_data else None # type: ignore
600
+
601
+ if text_features:
602
+ text_feature = text_features.get(image_id)
603
+ example["text_feature"] = asdict(text_feature) if text_feature else None
604
+
605
+ yield idx, example
README.md ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ annotations_creators:
3
+ - crowdsourced
4
+ language:
5
+ - zh
6
+ language_creators:
7
+ - found
8
+ license:
9
+ - unknown
10
+ multilinguality:
11
+ - monolingual
12
+ pretty_name: CGL-Dataset v2
13
+ size_categories: []
14
+ source_datasets:
15
+ - CGL-Dataset
16
+ tags:
17
+ - graphic design
18
+ task_categories:
19
+ - other
20
+ task_ids: []
21
+ ---
22
+
23
+ # Dataset Card for CGL-Dataset-v2
24
+
25
+ [![CI](https://github.com/shunk031/huggingface-datasets_CGL-Dataset-v2/actions/workflows/ci.yaml/badge.svg)](https://github.com/shunk031/huggingface-datasets_CGL-Dataset-v2/actions/workflows/ci.yaml)
26
+ [![Sync HF](https://github.com/shunk031/huggingface-datasets_CGL-Dataset-v2/actions/workflows/push_to_hub.yaml/badge.svg)](https://github.com/shunk031/huggingface-datasets_CGL-Dataset-v2/actions/workflows/push_to_hub.yaml)
27
+
28
+ ## Table of Contents
29
+ - [Dataset Card Creation Guide](#dataset-card-creation-guide)
30
+ - [Table of Contents](#table-of-contents)
31
+ - [Dataset Description](#dataset-description)
32
+ - [Dataset Summary](#dataset-summary)
33
+ - [Supported Tasks and Leaderboards](#supported-tasks-and-leaderboards)
34
+ - [Languages](#languages)
35
+ - [Dataset Structure](#dataset-structure)
36
+ - [Data Instances](#data-instances)
37
+ - [Data Fields](#data-fields)
38
+ - [Data Splits](#data-splits)
39
+ - [Dataset Creation](#dataset-creation)
40
+ - [Curation Rationale](#curation-rationale)
41
+ - [Source Data](#source-data)
42
+ - [Initial Data Collection and Normalization](#initial-data-collection-and-normalization)
43
+ - [Who are the source language producers?](#who-are-the-source-language-producers)
44
+ - [Annotations](#annotations)
45
+ - [Annotation process](#annotation-process)
46
+ - [Who are the annotators?](#who-are-the-annotators)
47
+ - [Personal and Sensitive Information](#personal-and-sensitive-information)
48
+ - [Considerations for Using the Data](#considerations-for-using-the-data)
49
+ - [Social Impact of Dataset](#social-impact-of-dataset)
50
+ - [Discussion of Biases](#discussion-of-biases)
51
+ - [Other Known Limitations](#other-known-limitations)
52
+ - [Additional Information](#additional-information)
53
+ - [Dataset Curators](#dataset-curators)
54
+ - [Licensing Information](#licensing-information)
55
+ - [Citation Information](#citation-information)
56
+ - [Contributions](#contributions)
57
+
58
+ ## Dataset Description
59
+
60
+ - **Homepage:** https://github.com/liuan0803/RADM
61
+ - **Repository:** https://github.com/shunk031/huggingface-datasets_CGL-Dataset-v2
62
+ - **Paper (Preprint):** https://arxiv.org/abs/2306.09086
63
+ - **Paper (CIKM'23):** https://dl.acm.org/doi/10.1145/3583780.3615028
64
+
65
+ ### Dataset Summary
66
+
67
+ CGL-Dataset V2 is a dataset for the task of automatic graphic layout design of advertising posters, containing 60,548 training samples and 1035 testing samples. It is an extension of CGL-Dataset.
68
+
69
+ ### Supported Tasks and Leaderboards
70
+
71
+ [More Information Needed]
72
+
73
+ <!-- For each of the tasks tagged for this dataset, give a brief description of the tag, metrics, and suggested models (with a link to their HuggingFace implementation if available). Give a similar description of tasks that were not covered by the structured tag set (repace the `task-category-tag` with an appropriate `other:other-task-name`).
74
+
75
+ - `task-category-tag`: The dataset can be used to train a model for [TASK NAME], which consists in [TASK DESCRIPTION]. Success on this task is typically measured by achieving a *high/low* [metric name](https://huggingface.co/metrics/metric_name). The ([model name](https://huggingface.co/model_name) or [model class](https://huggingface.co/transformers/model_doc/model_class.html)) model currently achieves the following score. *[IF A LEADERBOARD IS AVAILABLE]:* This task has an active leaderboard which can be found at [leaderboard url]() and ranks models based on [metric name](https://huggingface.co/metrics/metric_name) while also reporting [other metric name](https://huggingface.co/metrics/other_metric_name). -->
76
+
77
+ ### Languages
78
+
79
+ The language data in CGL-Dataset v2 is in Chinese ([BCP-47 zh](https://www.rfc-editor.org/info/bcp47)).
80
+
81
+ ## Dataset Structure
82
+
83
+ ### Data Instances
84
+
85
+ To use CGL-Dataset v2 dataset, you need to download `RADM_dataset.tar.gz` that includes the poster image, text and text features via [JD Cloud](https://3.cn/10-dQKDKG) or [Google Drive](https://drive.google.com/file/d/1ezOzR7MX3MFFIfWgJmmEaqXn3iDFp2si/view?usp=sharing).
86
+ Then place the downloaded files in the following structure and specify its path.
87
+
88
+ ```shell
89
+ /path/to/datasets
90
+ └── RADM_dataset.tar.gz
91
+ ```
92
+
93
+ ```python
94
+ import datasets as ds
95
+
96
+ dataset = ds.load_dataset(
97
+ path="shunk031/PosterErase",
98
+ data_dir="/path/to/datasets/RADM_dataset.tar.gz",
99
+ decode_rle=True, # True if Run-length Encoding (RLE) is to be decoded and converted to binary mask.
100
+ include_text_features=True, # True if RoBERTa-based text feature is to be loaded.
101
+ )
102
+ ```
103
+
104
+ ### Data Fields
105
+
106
+ [More Information Needed]
107
+
108
+ <!-- List and describe the fields present in the dataset. Mention their data type, and whether they are used as input or output in any of the tasks the dataset currently supports. If the data has span indices, describe their attributes, such as whether they are at the character level or word level, whether they are contiguous or not, etc. If the datasets contains example IDs, state whether they have an inherent meaning, such as a mapping to other datasets or pointing to relationships between data points.
109
+
110
+ - `example_field`: description of `example_field`
111
+
112
+ Note that the descriptions can be initialized with the **Show Markdown Data Fields** output of the [Datasets Tagging app](https://huggingface.co/spaces/huggingface/datasets-tagging), you will then only need to refine the generated descriptions. -->
113
+
114
+ ### Data Splits
115
+
116
+ [More Information Needed]
117
+
118
+ <!-- Describe and name the splits in the dataset if there are more than one.
119
+
120
+ Describe any criteria for splitting the data, if used. If there are differences between the splits (e.g. if the training annotations are machine-generated and the dev and test ones are created by humans, or if different numbers of annotators contributed to each example), describe them here.
121
+
122
+ Provide the sizes of each split. As appropriate, provide any descriptive statistics for the features, such as average length. For example:
123
+
124
+ | | train | validation | test |
125
+ |-------------------------|------:|-----------:|-----:|
126
+ | Input Sentences | | | |
127
+ | Average Sentence Length | | | | -->
128
+
129
+ ## Dataset Creation
130
+
131
+ ### Curation Rationale
132
+
133
+ [More Information Needed]
134
+
135
+ <!-- What need motivated the creation of this dataset? What are some of the reasons underlying the major choices involved in putting it together? -->
136
+
137
+ ### Source Data
138
+
139
+ [More Information Needed]
140
+
141
+ <!-- This section describes the source data (e.g. news text and headlines, social media posts, translated sentences,...) -->
142
+
143
+ #### Initial Data Collection and Normalization
144
+
145
+ [More Information Needed]
146
+
147
+ <!-- Describe the data collection process. Describe any criteria for data selection or filtering. List any key words or search terms used. If possible, include runtime information for the collection process.
148
+
149
+ If data was collected from other pre-existing datasets, link to source here and to their [Hugging Face version](https://huggingface.co/datasets/dataset_name).
150
+
151
+ If the data was modified or normalized after being collected (e.g. if the data is word-tokenized), describe the process and the tools used. -->
152
+
153
+ #### Who are the source language producers?
154
+
155
+ [More Information Needed]
156
+
157
+ <!-- State whether the data was produced by humans or machine generated. Describe the people or systems who originally created the data.
158
+
159
+ If available, include self-reported demographic or identity information for the source data creators, but avoid inferring this information. Instead state that this information is unknown. See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender.
160
+
161
+ Describe the conditions under which the data was created (for example, if the producers were crowdworkers, state what platform was used, or if the data was found, what website the data was found on). If compensation was provided, include that information here.
162
+
163
+ Describe other people represented or mentioned in the data. Where possible, link to references for the information. -->
164
+
165
+ ### Annotations
166
+
167
+ [More Information Needed]
168
+
169
+ <!-- If the dataset contains annotations which are not part of the initial data collection, describe them in the following paragraphs. -->
170
+
171
+ #### Annotation process
172
+
173
+ [More Information Needed]
174
+
175
+ <!-- If applicable, describe the annotation process and any tools used, or state otherwise. Describe the amount of data annotated, if not all. Describe or reference annotation guidelines provided to the annotators. If available, provide interannotator statistics. Describe any annotation validation processes. -->
176
+
177
+ #### Who are the annotators?
178
+
179
+ [More Information Needed]
180
+
181
+ <!-- If annotations were collected for the source data (such as class labels or syntactic parses), state whether the annotations were produced by humans or machine generated.
182
+
183
+ Describe the people or systems who originally created the annotations and their selection criteria if applicable.
184
+
185
+ If available, include self-reported demographic or identity information for the annotators, but avoid inferring this information. Instead state that this information is unknown. See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender.
186
+
187
+ Describe the conditions under which the data was annotated (for example, if the annotators were crowdworkers, state what platform was used, or if the data was found, what website the data was found on). If compensation was provided, include that information here. -->
188
+
189
+ ### Personal and Sensitive Information
190
+
191
+ [More Information Needed]
192
+
193
+ <!-- State whether the dataset uses identity categories and, if so, how the information is used. Describe where this information comes from (i.e. self-reporting, collecting from profiles, inferring, etc.). See [Larson 2017](https://www.aclweb.org/anthology/W17-1601.pdf) for using identity categories as a variables, particularly gender. State whether the data is linked to individuals and whether those individuals can be identified in the dataset, either directly or indirectly (i.e., in combination with other data).
194
+
195
+ State whether the dataset contains other data that might be considered sensitive (e.g., data that reveals racial or ethnic origins, sexual orientations, religious beliefs, political opinions or union memberships, or locations; financial or health data; biometric or genetic data; forms of government identification, such as social security numbers; criminal history).
196
+
197
+ If efforts were made to anonymize the data, describe the anonymization process. -->
198
+
199
+ ## Considerations for Using the Data
200
+
201
+ ### Social Impact of Dataset
202
+
203
+ [More Information Needed]
204
+
205
+ <!-- Please discuss some of the ways you believe the use of this dataset will impact society.
206
+
207
+ The statement should include both positive outlooks, such as outlining how technologies developed through its use may improve people's lives, and discuss the accompanying risks. These risks may range from making important decisions more opaque to people who are affected by the technology, to reinforcing existing harmful biases (whose specifics should be discussed in the next section), among other considerations.
208
+
209
+ Also describe in this section if the proposed dataset contains a low-resource or under-represented language. If this is the case or if this task has any impact on underserved communities, please elaborate here. -->
210
+
211
+ ### Discussion of Biases
212
+
213
+ [More Information Needed]
214
+
215
+ <!-- Provide descriptions of specific biases that are likely to be reflected in the data, and state whether any steps were taken to reduce their impact.
216
+
217
+ For Wikipedia text, see for example [Dinan et al 2020 on biases in Wikipedia (esp. Table 1)](https://arxiv.org/abs/2005.00614), or [Blodgett et al 2020](https://www.aclweb.org/anthology/2020.acl-main.485/) for a more general discussion of the topic.
218
+
219
+ If analyses have been run quantifying these biases, please add brief summaries and links to the studies here. -->
220
+
221
+ ### Other Known Limitations
222
+
223
+ [More Information Needed]
224
+
225
+ <!-- If studies of the datasets have outlined other limitations of the dataset, such as annotation artifacts, please outline and cite them here. -->
226
+
227
+ ## Additional Information
228
+
229
+ ### Dataset Curators
230
+
231
+ [More Information Needed]
232
+
233
+ <!-- List the people involved in collecting the dataset and their affiliation(s). If funding information is known, include it here. -->
234
+
235
+ ### Licensing Information
236
+
237
+ [More Information Needed]
238
+
239
+ <!-- Provide the license and link to the license webpage if available. -->
240
+
241
+ ### Citation Information
242
+
243
+ <!-- Provide the [BibTex](http://www.bibtex.org/)-formatted reference for the dataset. For example:
244
+ ```
245
+ @article{article_id,
246
+ author = {Author List},
247
+ title = {Dataset Paper Title},
248
+ journal = {Publication Venue},
249
+ year = {2525}
250
+ }
251
+ ```
252
+
253
+ If the dataset has a [DOI](https://www.doi.org/), please provide it here. -->
254
+
255
+ ```bibtex
256
+ @inproceedings{li2023relation,
257
+ title={Relation-Aware Diffusion Model for Controllable Poster Layout Generation},
258
+ author={Li, Fengheng and Liu, An and Feng, Wei and Zhu, Honghe and Li, Yaoyu and Zhang, Zheng and Lv, Jingjing and Zhu, Xin and Shen, Junjie and Lin, Zhangang},
259
+ booktitle={Proceedings of the 32nd ACM international conference on information & knowledge management},
260
+ pages={1249--1258},
261
+ year={2023}
262
+ }
263
+ ```
264
+
265
+ ### Contributions
266
+
267
+ Thanks to [@liuan0803](https://github.com/liuan0803) for creating this dataset.
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "huggingface-datasets-cgl-dataset-v2"
3
+ version = "0.1.0"
4
+ description = ""
5
+ authors = ["Shunsuke KITADA <shunsuke.kitada.0831@gmail.com>"]
6
+ readme = "README.md"
7
+
8
+ [tool.poetry.dependencies]
9
+ python = "^3.9"
10
+ datasets = { extras = ["vision"], version = "^2.16.1" }
11
+ pycocotools = "^2.0.7"
12
+ torch = ">=1.0.0"
13
+
14
+ [tool.poetry.group.dev.dependencies]
15
+ ruff = "^0.1.11"
16
+ black = "^23.12.1"
17
+ isort = "^5.13.2"
18
+ mypy = "^1.8.0"
19
+ pytest = "^7.4.4"
20
+ types-pillow = "^10.1.0.20240106"
21
+ types-pycocotools = "^2.0.0.20240106"
22
+ types-tqdm = "^4.66.0.20240106"
23
+
24
+ [build-system]
25
+ requires = ["poetry-core"]
26
+ build-backend = "poetry.core.masonry.api"
tests/CGL-Dataset-v2_test.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ import datasets as ds
4
+ import pytest
5
+
6
+
7
+ @pytest.fixture
8
+ def dataset_path() -> str:
9
+ return "CGL-Dataset-v2.py"
10
+
11
+
12
+ @pytest.fixture
13
+ def data_dir() -> str:
14
+ return "RADM_dataset.tar.gz"
15
+
16
+
17
+ @pytest.mark.skipif(
18
+ condition=bool(os.environ.get("CI", False)),
19
+ reason=(
20
+ "Because this loading script downloads a large dataset, "
21
+ "we will skip running it on CI."
22
+ ),
23
+ )
24
+ @pytest.mark.parametrize(
25
+ argnames="decode_rle",
26
+ argvalues=(
27
+ True,
28
+ False,
29
+ ),
30
+ )
31
+ @pytest.mark.parametrize(
32
+ argnames="include_text_features",
33
+ argvalues=(
34
+ True,
35
+ False,
36
+ ),
37
+ )
38
+ def test_load_dataset(
39
+ dataset_path: str,
40
+ data_dir: str,
41
+ include_text_features: bool,
42
+ decode_rle: bool,
43
+ expected_num_train: int = 60548,
44
+ expected_num_test: int = 1035,
45
+ ):
46
+ dataset = ds.load_dataset(
47
+ path=dataset_path,
48
+ data_dir=data_dir,
49
+ decode_rle=decode_rle,
50
+ include_text_features=include_text_features,
51
+ )
52
+ assert dataset["train"].num_rows == expected_num_train
53
+ assert dataset["test"].num_rows == expected_num_test
tests/__init__.py ADDED
File without changes