| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| import json |
|
|
| import datasets |
|
|
|
|
| _CITATION = """\ |
| @InProceedings{huggingface:dataset, |
| title = {Graffiti}, |
| author={UR |
| }, |
| year={2023} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| Graffiti dataset taken from https://www.graffiti.org/ and https://www.graffiti-database.com/. |
| """ |
|
|
| _HOMEPAGE = "https://huggingface.co/datasets/artificialhoney/graffiti" |
|
|
| _LICENSE = "Apache License 2.0" |
|
|
| _VERSION = "0.1.0" |
|
|
| _SOURCES = [ |
| "graffiti.org", |
| "graffiti-database.com" |
| ] |
|
|
|
|
| class GraffitiConfig(datasets.BuilderConfig): |
| """BuilderConfig for Graffiti.""" |
|
|
| def __init__(self, **kwargs): |
| """BuilderConfig for Graffiti. |
| Args: |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| super(GraffitiConfig, self).__init__(**kwargs) |
|
|
|
|
| class Graffiti(datasets.GeneratorBasedBuilder): |
| """Graffiti dataset taken from https://www.graffiti.org/ and https://www.graffiti-database.com/.""" |
|
|
| BUILDER_CONFIG_CLASS = GraffitiConfig |
|
|
| BUILDER_CONFIGS = [ |
| GraffitiConfig( |
| name="default", |
| ), |
| ] |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "image": datasets.Image(), |
| "conditioning_image": datasets.Image(), |
| "text": datasets.Value("string") |
| } |
| ), |
| homepage=_HOMEPAGE, |
| license=_LICENSE, |
| citation=_CITATION, |
| version=_VERSION, |
| task_templates=[], |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| """Returns SplitGenerators.""" |
| images = [] |
| metadata = [] |
| conditioning = [] |
| for source in _SOURCES: |
| images.append(dl_manager.iter_archive(dl_manager.download("./data/{0}/images.tar.gz".format(source)))) |
| conditioning.append(dl_manager.iter_archive(dl_manager.download("./data/{0}/conditioning.tar.gz".format(source)))) |
| metadata.append(dl_manager.download("./data/{0}/metadata.jsonl".format(source))) |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| |
| gen_kwargs={ |
| "images": images, |
| "metadata": metadata, |
| "conditioning": conditioning |
| }, |
| ) |
| ] |
|
|
| def _generate_examples(self, metadata, images, conditioning): |
| idx = 0 |
| for index, meta in enumerate(metadata): |
| m = [] |
| with open(meta, encoding="utf-8") as f: |
| for row in f: |
| m.append(json.loads(row)) |
| c = iter(conditioning[index]) |
| for file_path, file_obj in images[index]: |
| data = [x for x in m if file_path.endswith(x["file"])][0] |
|
|
| conditioning_file = next(c) |
| conditioning_file_path = conditioning_file[0] |
| conditioning_file_obj = conditioning_file[1] |
|
|
| text = data["caption"] |
| if data["palette"] != None: |
| colors = [] |
| for color in data["palette"]: |
| if color[2] in colors or "grey" in color[2]: |
| continue |
| colors.append(color[2]) |
| if len(colors) > 0: |
| text += ", in the colors " |
| text += " and ".join(colors) |
| if data["artist"] != None: |
| |
| text += ", by " + data["artist"] |
| if data["city"] != None: |
| text += ", located in " + data["city"] |
|
|
| yield idx, { |
| "image": {"path": file_path, "bytes": file_obj.read()}, |
| "conditioning_image": {"path": conditioning_file_path, "bytes": conditioning_file_obj.read()}, |
| "text": text, |
| } |
| idx+=1 |