Diwank Singh Tomer commited on
Commit
6117d42
1 Parent(s): 1d55733

feat: Add dataset script

Browse files

Signed-off-by: Diwank Singh Tomer <diwank.singh@gmail.com>

Files changed (7) hide show
  1. dataset_infos.json +5 -3
  2. download.sh +3 -0
  3. lld/loader.py +4 -2
  4. lld/preprocess.py +19 -8
  5. lld_logos.py +86 -0
  6. poetry.lock +54 -2
  7. pyproject.toml +4 -2
dataset_infos.json CHANGED
@@ -1,6 +1,8 @@
1
- {"lld": {
 
2
  "description": "Designing a logo for a new brand is a lengthy and tedious back-and-forth process between a designer and a client. In this paper we explore to what extent machine learning can solve the creative task of the designer. For this, we build a dataset -- LLD -- of 600k+ logos crawled from the world wide web. Training Generative Adversarial Networks (GANs) for logo synthesis on such multi-modal data is not straightforward and results in mode collapse for some state-of-the-art methods. We propose the use of synthetic labels obtained through clustering to disentangle and stabilize GAN training. We are able to generate a high diversity of plausible logos and we demonstrate latent space exploration techniques to ease the logo design task in an interactive manner. Moreover, we validate the proposed clustered GAN training on CIFAR 10, achieving state-of-the-art Inception scores when using synthetic labels obtained via clustering the features of an ImageNet classifier. GANs can cope with multi-modal data by means of synthetic labels achieved through clustering, and our results show the creative potential of such techniques for logo synthesis and manipulation.",
3
- "citation": "@misc{sage2017logodataset, author={Sage, Alexander and Agustsson, Eirikur and Timofte, Radu and Van Gool, Luc}, title = {LLD - Large Logo Dataset - version 0.1}, year = {2017}, howpublished = "\url{https://data.vision.ee.ethz.ch/cvl/lld}"}",
4
  "homepage": "https://data.vision.ee.ethz.ch/sagea/lld/",
5
  "config_name": "labeled"
6
- }}
 
1
+ {
2
+ "lld": {
3
  "description": "Designing a logo for a new brand is a lengthy and tedious back-and-forth process between a designer and a client. In this paper we explore to what extent machine learning can solve the creative task of the designer. For this, we build a dataset -- LLD -- of 600k+ logos crawled from the world wide web. Training Generative Adversarial Networks (GANs) for logo synthesis on such multi-modal data is not straightforward and results in mode collapse for some state-of-the-art methods. We propose the use of synthetic labels obtained through clustering to disentangle and stabilize GAN training. We are able to generate a high diversity of plausible logos and we demonstrate latent space exploration techniques to ease the logo design task in an interactive manner. Moreover, we validate the proposed clustered GAN training on CIFAR 10, achieving state-of-the-art Inception scores when using synthetic labels obtained via clustering the features of an ImageNet classifier. GANs can cope with multi-modal data by means of synthetic labels achieved through clustering, and our results show the creative potential of such techniques for logo synthesis and manipulation.",
4
+ "citation": "@misc{sage2017logodataset, author={Sage, Alexander and Agustsson, Eirikur and Timofte, Radu and Van Gool, Luc}, title = {LLD - Large Logo Dataset - version 0.1}, year = {2017}, howpublished = url{https://data.vision.ee.ethz.ch/cvl/lld}}",
5
  "homepage": "https://data.vision.ee.ethz.ch/sagea/lld/",
6
  "config_name": "labeled"
7
+ }
8
+ }
download.sh ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ wget -O raw/LLD-logo.hdf5 https://data.vision.ee.ethz.ch/sagea/lld/data/LLD-logo.hdf5
lld/loader.py CHANGED
@@ -7,13 +7,15 @@ import numpy as np
7
  import PIL.Image as Image
8
 
9
  script_dir = os.path.dirname(__file__)
10
- datafile_path = os.path.join(script_dir, "../data/LLD-logo.hdf5")
11
 
12
  with h5py.File(datafile_path, "r") as throwaway:
13
  samples_count: int = len(throwaway["data"])
14
 
15
 
16
- def gen_samples(labels: list[str] = ["data", "meta_data/names"]):
 
 
17
 
18
  # open hdf5 file
19
  with h5py.File(datafile_path, "r") as hdf5_file:
7
  import PIL.Image as Image
8
 
9
  script_dir = os.path.dirname(__file__)
10
+ datafile_path = os.path.join(script_dir, "../raw/LLD-logo.hdf5")
11
 
12
  with h5py.File(datafile_path, "r") as throwaway:
13
  samples_count: int = len(throwaway["data"])
14
 
15
 
16
+ def gen_samples(
17
+ labels: list[str] = ["data", "meta_data/names"], datafile_path: str = datafile_path
18
+ ):
19
 
20
  # open hdf5 file
21
  with h5py.File(datafile_path, "r") as hdf5_file:
lld/preprocess.py CHANGED
@@ -9,18 +9,20 @@ import numpy as np
9
  import pandas as pd
10
  from tqdm.asyncio import trange
11
 
12
- from .loader import gen_samples, samples_count
13
  from .crawler import run
14
 
15
  script_dir = os.path.dirname(__file__)
16
  outfile_path = os.path.join(script_dir, "../data/lld-processed.h5")
17
 
18
 
19
- async def gen_processor(batch_size: int, limit: int):
 
 
20
  count = min(limit, samples_count)
21
  batch_size = min(limit, batch_size)
22
 
23
- samples = gen_samples()
24
  steps = count // batch_size
25
 
26
  for step in trange(steps):
@@ -42,24 +44,33 @@ async def gen_processor(batch_size: int, limit: int):
42
  yield data
43
 
44
 
45
- async def preprocess(batch_size: int = 100, limit: int = samples_count + 1):
 
 
 
 
46
 
47
  columns = ["images", "description", "name"]
48
 
49
- processor = gen_processor(batch_size, limit)
50
 
51
  chunk_size = 1000
52
  async with stream.chunks(processor, chunk_size).stream() as chunks:
53
  async for chunk in chunks:
54
  df_chunk = pd.DataFrame(chunk, columns=columns)
55
- df_chunk.to_hdf(
56
- outfile_path, "data", data_columns=columns, mode="a"
57
- )
58
 
59
 
60
  if __name__ == "__main__":
61
  parser = argparse.ArgumentParser()
62
 
 
 
 
 
 
 
 
63
  parser.add_argument(
64
  "--limit",
65
  help="Limit to total records processed",
9
  import pandas as pd
10
  from tqdm.asyncio import trange
11
 
12
+ from .loader import datafile_path, gen_samples, samples_count
13
  from .crawler import run
14
 
15
  script_dir = os.path.dirname(__file__)
16
  outfile_path = os.path.join(script_dir, "../data/lld-processed.h5")
17
 
18
 
19
+ async def gen_processor(
20
+ batch_size: int, limit: int, datafile_path: str = datafile_path
21
+ ):
22
  count = min(limit, samples_count)
23
  batch_size = min(limit, batch_size)
24
 
25
+ samples = gen_samples(datafile_path=datafile_path)
26
  steps = count // batch_size
27
 
28
  for step in trange(steps):
44
  yield data
45
 
46
 
47
+ async def preprocess(
48
+ batch_size: int = 100,
49
+ limit: int = samples_count + 1,
50
+ datafile_path: str = datafile_path,
51
+ ):
52
 
53
  columns = ["images", "description", "name"]
54
 
55
+ processor = gen_processor(batch_size, limit, datafile_path=datafile_path)
56
 
57
  chunk_size = 1000
58
  async with stream.chunks(processor, chunk_size).stream() as chunks:
59
  async for chunk in chunks:
60
  df_chunk = pd.DataFrame(chunk, columns=columns)
61
+ df_chunk.to_hdf(outfile_path, "data", data_columns=columns, mode="a")
 
 
62
 
63
 
64
  if __name__ == "__main__":
65
  parser = argparse.ArgumentParser()
66
 
67
+ parser.add_argument(
68
+ "--datafile_path",
69
+ help="Path to downloaded archive",
70
+ type=str,
71
+ default=datafile_path,
72
+ )
73
+
74
  parser.add_argument(
75
  "--limit",
76
  help="Limit to total records processed",
lld_logos.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Dataset class for LLD dataset."""
2
+
3
+ import datasets as ds
4
+ import pandas as pd
5
+ from sklearn.model_selection import train_test_split
6
+
7
+
8
+ _HOMEPAGE = "https://huggingface.co/datasets/diwank/lld"
9
+ _LICENSE = "MIT"
10
+
11
+ _DESCRIPTION = """
12
+ Designing a logo for a new brand is a lengthy and tedious back-and-forth process between a designer and a client. In this paper we explore to what extent machine learning can solve the creative task of the designer. For this, we build a dataset -- LLD -- of 600k+ logos crawled from the world wide web. Training Generative Adversarial Networks (GANs) for logo synthesis on such multi-modal data is not straightforward and results in mode collapse for some state-of-the-art methods. We propose the use of synthetic labels obtained through clustering to disentangle and stabilize GAN training. We are able to generate a high diversity of plausible logos and we demonstrate latent space exploration techniques to ease the logo design task in an interactive manner. Moreover, we validate the proposed clustered GAN training on CIFAR 10, achieving state-of-the-art Inception scores when using synthetic labels obtained via clustering the features of an ImageNet classifier. GANs can cope with multi-modal data by means of synthetic labels achieved through clustering, and our results show the creative potential of such techniques for logo synthesis and manipulation.
13
+ """
14
+
15
+ _CITATION = """
16
+ @misc{sage2017logodataset,
17
+ author={Sage, Alexander and Agustsson, Eirikur and Timofte, Radu and Van Gool, Luc},
18
+ title = {LLD - Large Logo Dataset - version 0.1},
19
+ year = {2017},
20
+ """
21
+
22
+ _URL = "https://huggingface.co/datasets/diwank/lld/resolve/main/data/lld-processed.h5"
23
+
24
+
25
+ class LLD(ds.GeneratorBasedBuilder):
26
+ """LLD Images dataset."""
27
+
28
+ def _info(self):
29
+ print("_info(self):")
30
+ import pdb; pdb.set_trace()
31
+ return ds.DatasetInfo(
32
+ description=_DESCRIPTION,
33
+ features=ds.Features(
34
+ {
35
+ "image": ds.Sequence(feature=ds.Image()),
36
+ "description": ds.Value("string"),
37
+ }
38
+ ),
39
+ supervised_keys=("image", "description"),
40
+ homepage=_HOMEPAGE,
41
+ citation=_CITATION,
42
+ license=_LICENSE,
43
+ )
44
+
45
+ def _split_generators(self, dl_manager):
46
+ print("_split_generators(self, dl_manager):")
47
+ import pdb; pdb.set_trace()
48
+ # Load dataframe
49
+ use_local = os.environ.get("USE_LOCAL")
50
+ archive_path = (
51
+ "./data/lld-processed.h5" if use_local else dl_manager.download(_URL)
52
+ )
53
+ df = pd.read_hdf(archive_path)
54
+
55
+ X = df.pop("description")
56
+ y = df.pop("images")
57
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
58
+
59
+ return [
60
+ ds.SplitGenerator(
61
+ name=ds.Split.TRAIN,
62
+ gen_kwargs={
63
+ "description": X_train,
64
+ "images": y_train,
65
+ },
66
+ ),
67
+ ds.SplitGenerator(
68
+ name=ds.Split.TEST,
69
+ gen_kwargs={
70
+ "description": X_test,
71
+ "images": y_test,
72
+ },
73
+ ),
74
+ ]
75
+
76
+ def _generate_examples(self, description, images):
77
+ print("_generate_examples(self, description, images):")
78
+ import pdb; pdb.set_trace()
79
+ """Generate images and description splits."""
80
+
81
+ for i, (desc, imgs) in enumerate(zip(description.values, images.values)):
82
+ for img in imgs:
83
+ yield i, {
84
+ "image": {"bytes": img},
85
+ "description": desc,
86
+ }
poetry.lock CHANGED
@@ -264,6 +264,7 @@ multiprocess = "*"
264
  numpy = ">=1.17"
265
  packaging = "*"
266
  pandas = "*"
 
267
  pyarrow = ">=6.0.0"
268
  requests = ">=2.19.0"
269
  responses = "<0.19"
@@ -535,6 +536,14 @@ MarkupSafe = ">=2.0"
535
  [package.extras]
536
  i18n = ["Babel (>=2.7)"]
537
 
 
 
 
 
 
 
 
 
538
  [[package]]
539
  name = "locket"
540
  version = "1.0.0"
@@ -900,6 +909,37 @@ urllib3 = ">=1.25.10"
900
  [package.extras]
901
  tests = ["pytest (>=4.6)", "coverage (>=6.0.0)", "pytest-cov", "pytest-localserver", "flake8", "types-mock", "types-requests", "mypy"]
902
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
903
  [[package]]
904
  name = "six"
905
  version = "1.16.0"
@@ -964,6 +1004,14 @@ category = "main"
964
  optional = false
965
  python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
966
 
 
 
 
 
 
 
 
 
967
  [[package]]
968
  name = "tomli"
969
  version = "2.0.1"
@@ -1086,8 +1134,8 @@ heapdict = "*"
1086
 
1087
  [metadata]
1088
  lock-version = "1.1"
1089
- python-versions = "^3.10"
1090
- content-hash = "a2e4f349bb1aaa25793b817b43a1c0bb6c008857aa2cc30df73baf5e048dfa22"
1091
 
1092
  [metadata.files]
1093
  aiodns = []
@@ -1326,6 +1374,7 @@ jedi = [
1326
  {file = "jedi-0.18.1.tar.gz", hash = "sha256:74137626a64a99c8eb6ae5832d99b3bdd7d29a3850fe2aa80a4126b2a7d949ab"},
1327
  ]
1328
  jinja2 = []
 
1329
  locket = []
1330
  markupsafe = []
1331
  matplotlib-inline = [
@@ -1570,6 +1619,8 @@ responses = [
1570
  {file = "responses-0.18.0-py3-none-any.whl", hash = "sha256:15c63ad16de13ee8e7182d99c9334f64fd81f1ee79f90748d527c28f7ca9dd51"},
1571
  {file = "responses-0.18.0.tar.gz", hash = "sha256:380cad4c1c1dc942e5e8a8eaae0b4d4edf708f4f010db8b7bcfafad1fcd254ff"},
1572
  ]
 
 
1573
  six = [
1574
  {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
1575
  {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
@@ -1585,6 +1636,7 @@ stack-data = [
1585
  ]
1586
  tables = []
1587
  tblib = []
 
1588
  tomli = []
1589
  toolz = []
1590
  tornado = []
264
  numpy = ">=1.17"
265
  packaging = "*"
266
  pandas = "*"
267
+ Pillow = {version = ">=6.2.1", optional = true, markers = "extra == \"vision\""}
268
  pyarrow = ">=6.0.0"
269
  requests = ">=2.19.0"
270
  responses = "<0.19"
536
  [package.extras]
537
  i18n = ["Babel (>=2.7)"]
538
 
539
+ [[package]]
540
+ name = "joblib"
541
+ version = "1.1.0"
542
+ description = "Lightweight pipelining with Python functions"
543
+ category = "main"
544
+ optional = false
545
+ python-versions = ">=3.6"
546
+
547
  [[package]]
548
  name = "locket"
549
  version = "1.0.0"
909
  [package.extras]
910
  tests = ["pytest (>=4.6)", "coverage (>=6.0.0)", "pytest-cov", "pytest-localserver", "flake8", "types-mock", "types-requests", "mypy"]
911
 
912
+ [[package]]
913
+ name = "scikit-learn"
914
+ version = "1.1.1"
915
+ description = "A set of python modules for machine learning and data mining"
916
+ category = "main"
917
+ optional = false
918
+ python-versions = ">=3.8"
919
+
920
+ [package.dependencies]
921
+ joblib = ">=1.0.0"
922
+ numpy = ">=1.17.3"
923
+ scipy = ">=1.3.2"
924
+ threadpoolctl = ">=2.0.0"
925
+
926
+ [package.extras]
927
+ benchmark = ["matplotlib (>=3.1.2)", "pandas (>=1.0.5)", "memory-profiler (>=0.57.0)"]
928
+ docs = ["matplotlib (>=3.1.2)", "scikit-image (>=0.14.5)", "pandas (>=1.0.5)", "seaborn (>=0.9.0)", "memory-profiler (>=0.57.0)", "sphinx (>=4.0.1)", "sphinx-gallery (>=0.7.0)", "numpydoc (>=1.2.0)", "Pillow (>=7.1.2)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"]
929
+ examples = ["matplotlib (>=3.1.2)", "scikit-image (>=0.14.5)", "pandas (>=1.0.5)", "seaborn (>=0.9.0)"]
930
+ tests = ["matplotlib (>=3.1.2)", "scikit-image (>=0.14.5)", "pandas (>=1.0.5)", "pytest (>=5.0.1)", "pytest-cov (>=2.9.0)", "flake8 (>=3.8.2)", "black (>=22.3.0)", "mypy (>=0.770)", "pyamg (>=4.0.0)", "numpydoc (>=1.2.0)"]
931
+
932
+ [[package]]
933
+ name = "scipy"
934
+ version = "1.8.1"
935
+ description = "SciPy: Scientific Library for Python"
936
+ category = "main"
937
+ optional = false
938
+ python-versions = ">=3.8,<3.11"
939
+
940
+ [package.dependencies]
941
+ numpy = ">=1.17.3,<1.25.0"
942
+
943
  [[package]]
944
  name = "six"
945
  version = "1.16.0"
1004
  optional = false
1005
  python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*"
1006
 
1007
+ [[package]]
1008
+ name = "threadpoolctl"
1009
+ version = "3.1.0"
1010
+ description = "threadpoolctl"
1011
+ category = "main"
1012
+ optional = false
1013
+ python-versions = ">=3.6"
1014
+
1015
  [[package]]
1016
  name = "tomli"
1017
  version = "2.0.1"
1134
 
1135
  [metadata]
1136
  lock-version = "1.1"
1137
+ python-versions = ">=3.10,<3.11"
1138
+ content-hash = "ebfc659f0dac7f7b70f1533416f7e76ea3e1b56ce81bc072fe196357c22d82c5"
1139
 
1140
  [metadata.files]
1141
  aiodns = []
1374
  {file = "jedi-0.18.1.tar.gz", hash = "sha256:74137626a64a99c8eb6ae5832d99b3bdd7d29a3850fe2aa80a4126b2a7d949ab"},
1375
  ]
1376
  jinja2 = []
1377
+ joblib = []
1378
  locket = []
1379
  markupsafe = []
1380
  matplotlib-inline = [
1619
  {file = "responses-0.18.0-py3-none-any.whl", hash = "sha256:15c63ad16de13ee8e7182d99c9334f64fd81f1ee79f90748d527c28f7ca9dd51"},
1620
  {file = "responses-0.18.0.tar.gz", hash = "sha256:380cad4c1c1dc942e5e8a8eaae0b4d4edf708f4f010db8b7bcfafad1fcd254ff"},
1621
  ]
1622
+ scikit-learn = []
1623
+ scipy = []
1624
  six = [
1625
  {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
1626
  {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
1636
  ]
1637
  tables = []
1638
  tblib = []
1639
+ threadpoolctl = []
1640
  tomli = []
1641
  toolz = []
1642
  tornado = []
pyproject.toml CHANGED
@@ -6,10 +6,10 @@ authors = ["Diwank Singh Tomer <diwank.singh@gmail.com>"]
6
  license = "MIT"
7
 
8
  [tool.poetry.dependencies]
9
- python = "^3.10"
10
  numpy = "^1.23.1"
11
  h5py = "^3.7.0"
12
- datasets = "^2.3.2"
13
  Pillow = "^9.2.0"
14
  requests = "^2.28.1"
15
  bs4 = "^0.0.1"
@@ -22,6 +22,8 @@ tables = "^3.7.0"
22
  pyarrow = "^8.0.0"
23
  modin = {extras = ["dask"], version = "^0.15.2"}
24
  aiostream = "^0.4.4"
 
 
25
 
26
  [tool.poetry.dev-dependencies]
27
  ipython = "^8.4.0"
6
  license = "MIT"
7
 
8
  [tool.poetry.dependencies]
9
+ python = ">=3.10,<3.11"
10
  numpy = "^1.23.1"
11
  h5py = "^3.7.0"
12
+ datasets = {extras = ["vision"], version = "^2.3.2"}
13
  Pillow = "^9.2.0"
14
  requests = "^2.28.1"
15
  bs4 = "^0.0.1"
22
  pyarrow = "^8.0.0"
23
  modin = {extras = ["dask"], version = "^0.15.2"}
24
  aiostream = "^0.4.4"
25
+ scipy = "^1.8.1"
26
+ scikit-learn = "^1.1.1"
27
 
28
  [tool.poetry.dev-dependencies]
29
  ipython = "^8.4.0"