ParquetPractice / try.py
mickylan2367's picture
try another type construction
e89cc95
import datasets
from huggingface_hub import HfApi
from datasets import DownloadManager, DatasetInfo
from datasets.data_files import DataFilesDict
import os
import json
# memo
# train-00000-of-00001.parquet
# ここに設定を記入
_NAME = "mickylan2367/spectrogram_musicCaps"
_EXTENSION = [".png"]
_REVISION = "main"
# _HOMEPAGE = "https://github.com/fastai/imagenette"
# プログラムを置く場所が決まったら、ここにホームページURLつける
_HOMEPAGE = "https://huggingface.co/datasets/mickylan2367/spectrogram_musicCaps"
_DESCRIPTION = f"""\
{_NAME} Datasets including spectrogram.png file from Google MusicCaps Datasets!
Using for Project Learning...
"""
# え...なにこれ(;´・ω・)
_IMAGES_DIR = "mickylan2367/images/data/"
# _REPO = "https://huggingface.co/datasets/frgfm/imagenette/resolve/main/metadata"
# 参考になりそうなURL集
# https://huggingface.co/docs/datasets/v1.1.1/_modules/datasets/utils/download_manager.html
# https://huggingface.co/datasets/animelover/danbooru2022/blob/main/danbooru2022.py
# https://huggingface.co/datasets/food101/blob/main/food101.py
# https://huggingface.co/docs/datasets/about_dataset_load
class spectrogram_musicCapsConfig(datasets.BuilderConfig):
"""Builder Config for spectrogram_MusicCaps"""
def __init__(self, metadata_urls, **kwargs):
"""BuilderConfig
Args:
data_url: `string`, url to download the zip file from.
metadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
**kwargs: keyword arguments forwarded to super.
"""
super(spectrogram_musicCapsConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
# self.data_url = data_url
self.metadata_urls = metadata_urls
class spectrogram_musicCaps(datasets.GeneratorBasedBuilder):
# データのサブセットはここで用意
BUILDER_CONFIGS = [
spectrogram_musicCapsConfig(
name="MusicCaps data 0_10",
description="Datasets from MusicCaps by Mikan",
# data_url="https://huggingface.co/datasets/mickylan2367/spectrogram_musicCaps/blob/main/data/data0_10.zip",
metadata_urls = {
"train":"https://huggingface.co/datasets/mickylan2367/spectrogram_musicCaps/blob/main/data/metadata0_10.jsonl"
}
),
spectrogram_musicCapsConfig(
name="MusicCpas data 10_100",
description="Datasets second action by Mikan",
# data_url="https://huggingface.co/datasets/mickylan2367/spectrogram_musicCaps/blob/main/data/data10_200.zip",
metadata_urls = {
"train" : "https://huggingface.co/datasets/mickylan2367/spectrogram_musicCaps/blob/main/data/metadata10_200.jsonl"
}
)
]
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"image": datasets.Image(),
"caption": datasets.Value("string")
}
),
supervised_keys=("image", "caption"),
homepage=_HOMEPAGE,
# citation=_CITATION,
# license=_LICENSE,
# task_templates=[ImageClassification(image_column="image", label_column="label")],
)
# def _split_generators(self, dl_manager):
# archive_path = dl_manager.download(self.config.data_url)
# split_metadata_paths = dl_manager.download(self.config.metadata_urls)
# return [
# datasets.SplitGenerator(
# name=datasets.Split.TRAIN,
# gen_kwargs={
# "images": dl_manager.iter_archive(archive_path),
# "metadata_path": split_metadata_paths["train"],
# }
# )
# ]
def _split_generators(self, dl_manager: DownloadManager):
# huggingfaceのディレクトリからデータを取ってくる
hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
# archive_path = dl_manager.download(self.config.data_url)
split_metadata_paths = dl_manager.download(self.config.metadata_urls)
# **.zipのファイル名をDict型として取得?
data_files = DataFilesDict.from_hf_repo(
{datasets.Split.TRAIN: ["**"]},
dataset_info=hfh_dataset_info,
allowed_extensions=["zip", ".zip"],
)
gs = []
for split, files in data_files.items():
downloaded_files = dl_manager.download_and_extract(files) # zipファイルを解凍してファイル名リストにする。
# 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
gs.append(
datasets.SplitGenerator(
name = split,
gen_kwargs={
"images" : downloaded_files,
"metadata_path": split_metadata_paths["train"]
}
)
)
return gs
def _generate_examples(self, images, metadata_path):
"""Generate images and captions for splits."""
# with open(metadata_path, encoding="utf-8") as f:
# files_to_keep = set(f.read().split("\n"))
with open(metadata_path) as fin:
for idx, line in enumerate(fin):
data = json.loads(line)
# file_path = os.path.join(data["file_name"])
yield data["file_name"], {
"image": data["file_name"],
"caption":data["caption"]
}