File size: 3,934 Bytes
e30f551
8f38281
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e30f551
 
64cfadc
 
 
8f38281
e30f551
64cfadc
 
 
8f38281
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6c0e42b
8f38281
6c0e42b
 
 
 
 
 
 
 
 
 
8f38281
 
 
 
 
378c090
e30f551
8f38281
 
 
 
3f7b349
8f38281
 
 
 
 
 
e30f551
 
8f38281
e30f551
ffa5002
8f38281
 
 
 
e30f551
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
# Copyright 2022 Fazzie
# MIT License
"""Loading script for Teyvat."""

import re
import numpy as np
import pandas as pd

from json import load, dump
from os.path import join, basename
from huggingface_hub import hf_hub_url

import datasets

# You can copy an official description
_DESCRIPTION = """
Teyvat is the first small-scale text-to-image prompt dataset for Genshin impact. 
"""

_LICENSE = "CC0 1.0"
_VERSION = datasets.Version("0.0.1")

# Programmatically generate the URLs for different parts
# hf_hub_url() provides a more flexible way to resolve the file URLs
# https://huggingface.co/datasets/Fazzie/Teyvat/

_URLS = {"data" :hf_hub_url(
                repo_id = "Fazzie/Teyvat",
                filename = "data.zip",
                repo_type = "dataset",
            ),
        "metadata" :hf_hub_url(
                repo_id = "Fazzie/Teyvat",
                filename = "metadata.json",
                repo_type = "dataset",
            )
        }


class Teyvat(datasets.GeneratorBasedBuilder):
    """Teyvat is the first small-scale text-to-image prompt dataset for Genshin impact"""


    def _info(self):
        """Specify the information of Teyvat."""


        features = datasets.Features(
            {
                "image": datasets.Image(),
                "text": datasets.Value("string"),
            },
        )

        return datasets.DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            supervised_keys=None,
            license=_LICENSE,
            version=_VERSION
        )

    def _split_generators(self, dl_manager):
        # If several configurations are possible (listed in BUILDER_CONFIGS),
        # the configuration selected by the user is in self.config.name

        # dl_manager is a datasets.download.DownloadManager that can be used to
        # download and extract URLS It can accept any type or nested list/dict
        # and will give back the same structure with the url replaced with path
        # to local files. By default the archives will be extracted and a path
        # to a cached folder where they are extracted is returned instead of the
        # archive


        # Resolve the urls
    
        urls = _URLS

        # Also download the data
        data_path = dl_manager.download_and_extract(urls["data"])
        meta_data_path = dl_manager.download(urls["metadata"])
        
        # data = load(open(meta_data_path, "r", encoding="utf8"))
        # for image in data:
        #     image_path = "data/" + image["file_name"]
        #     image_path = hf_hub_url(
        #         "datasets/Fazzie/Teyvat",
        #         filename=image_path,
        #     ),
        #     dl_manager.download(image_path)

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                # These kwargs will be passed to _generate_examples
                gen_kwargs={
                    "data_path": data_path,
                    "meta_data_path": meta_data_path
                },
            ),
        ]

    def _generate_examples(self, data_path, meta_data_path):
        # This method handles input defined in _split_generators to yield
        # (key, example) tuples from the dataset.
        # The `key` is for legacy reasons (tfds) and is not important in itself,
        # but must be unique for each example.

        # Load the metadata parquet file if the config is text_only
        print("Loading metadata...", meta_data_path)
        data = load(open(meta_data_path, "r", encoding="utf8"))

        for image in data:
            image_path = join(data_path, "data", image["file_name"])
            text = image["text"]


      
            yield image_path, {
                "image": {
                    "path": image_path,
                    "bytes": open(image_path, "rb").read(),
                },
                "text": text,
            }