Teyvat / Teyvat.py
Fazzie's picture
fix path
64cfadc
# Copyright 2022 Fazzie
# MIT License
"""Loading script for Teyvat."""
import re
import numpy as np
import pandas as pd
from json import load, dump
from os.path import join, basename
from huggingface_hub import hf_hub_url
import datasets
# You can copy an official description
_DESCRIPTION = """
Teyvat is the first small-scale text-to-image prompt dataset for Genshin impact.
"""
_LICENSE = "CC0 1.0"
_VERSION = datasets.Version("0.0.1")
# Programmatically generate the URLs for different parts
# hf_hub_url() provides a more flexible way to resolve the file URLs
# https://huggingface.co/datasets/Fazzie/Teyvat/
_URLS = {"data" :hf_hub_url(
repo_id = "Fazzie/Teyvat",
filename = "data.zip",
repo_type = "dataset",
),
"metadata" :hf_hub_url(
repo_id = "Fazzie/Teyvat",
filename = "metadata.json",
repo_type = "dataset",
)
}
class Teyvat(datasets.GeneratorBasedBuilder):
"""Teyvat is the first small-scale text-to-image prompt dataset for Genshin impact"""
def _info(self):
"""Specify the information of Teyvat."""
features = datasets.Features(
{
"image": datasets.Image(),
"text": datasets.Value("string"),
},
)
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=features,
supervised_keys=None,
license=_LICENSE,
version=_VERSION
)
def _split_generators(self, dl_manager):
# If several configurations are possible (listed in BUILDER_CONFIGS),
# the configuration selected by the user is in self.config.name
# dl_manager is a datasets.download.DownloadManager that can be used to
# download and extract URLS It can accept any type or nested list/dict
# and will give back the same structure with the url replaced with path
# to local files. By default the archives will be extracted and a path
# to a cached folder where they are extracted is returned instead of the
# archive
# Resolve the urls
urls = _URLS
# Also download the data
data_path = dl_manager.download_and_extract(urls["data"])
meta_data_path = dl_manager.download(urls["metadata"])
# data = load(open(meta_data_path, "r", encoding="utf8"))
# for image in data:
# image_path = "data/" + image["file_name"]
# image_path = hf_hub_url(
# "datasets/Fazzie/Teyvat",
# filename=image_path,
# ),
# dl_manager.download(image_path)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={
"data_path": data_path,
"meta_data_path": meta_data_path
},
),
]
def _generate_examples(self, data_path, meta_data_path):
# This method handles input defined in _split_generators to yield
# (key, example) tuples from the dataset.
# The `key` is for legacy reasons (tfds) and is not important in itself,
# but must be unique for each example.
# Load the metadata parquet file if the config is text_only
print("Loading metadata...", meta_data_path)
data = load(open(meta_data_path, "r", encoding="utf8"))
for image in data:
image_path = join(data_path, "data", image["file_name"])
text = image["text"]
yield image_path, {
"image": {
"path": image_path,
"bytes": open(image_path, "rb").read(),
},
"text": text,
}