LoadingScriptPractice / LoadingScriptPractice.py
mickylan2367's picture
added another loading Script
b24185e
raw
history blame
No virus
6.59 kB
import datasets
from huggingface_hub import HfApi
from datasets import DownloadManager, DatasetInfo
from datasets.data_files import DataFilesDict
import os
import json
from os.path import dirname, basename
from pathlib import Path
# ここに設定を記入
_NAME = "mickylan2367/LoadingScriptPractice"
_EXTENSION = [".png"]
_REVISION = "main"
# _HOMEPAGE = "https://github.com/fastai/imagenette"
# プログラムを置く場所が決まったら、ここにホームページURLつける
_HOMEPAGE = "https://huggingface.co/datasets/mickylan2367/spectrogram_musicCaps"
_DESCRIPTION = f"""\
{_NAME} Datasets including spectrogram.png file from Google MusicCaps Datasets!
Using for Project Learning...
"""
# URLを取得
# class LoadingScriptPracticeConfig(datasets.BuilderConfig):
# """BuilderConfig for Imagette."""
# def __init__(self, data_url, metadata_urls, **kwargs):
# """BuilderConfig for Imagette.
# Args:
# data_url: `string`, url to download the zip file from.
# matadata_urls: dictionary with keys 'train' and 'validation' containing the archive metadata URLs
# **kwargs: keyword arguments forwarded to super.
# """
# super(LoadingScriptPracticeConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
# self.data_url = data_url
# self.metadata_urls = metadata_urls
class LoadingScriptPractice(datasets.GeneratorBasedBuilder):
# データのサブセットはここで用意
BUILDER_CONFIGS = [
datasets.BuilderConfig(
name="train",
description="this Datasets is personal practice for using loadingScript. Data is from Google/MusicCaps",
# data_url = train_data_url["train"][0],
# metadata_urls = {
# "train" : train_metadata_paths["train"][0]
# }
),
# splits (dict, optional) — The mapping between split name and metadata.
datasets.BuilderConfig(
name="test",
description="this Datasets is personal practice for using loadingScript. Data is from Google/MusicCaps",
# data_url = test_data_url["test"][0],
# metadata_urls={
# "test" : test_metadata_paths["test"][0]
# }
)
]
def _info(self) -> DatasetInfo:
return datasets.DatasetInfo(
description = self.config.description,
features=datasets.Features(
{
"image": datasets.Image(),
"caption": datasets.Value("string"),
"data_idx": datasets.Value("int32"),
"number" : datasets.Value("int32"),
"label" : datasets.ClassLabel(
names=[
"blues",
"classical",
"country",
"disco",
"hiphop",
"metal",
"pop",
"reggae",
"rock",
"jazz"
]
)
}
),
supervised_keys=("image", "caption"),
homepage=_HOMEPAGE,
citation= "",
# license=_LICENSE,
# task_templates=[ImageClassification(image_column="image", label_column="label")],
)
def _split_generators(self, dl_manager: DownloadManager):
# huggingfaceのディレクトリからデータを取ってくる
hfh_dataset_info = HfApi().dataset_info(_NAME, revision=_REVISION, timeout=100.0)
metadata_urls = DataFilesDict.from_hf_repo(
{datasets.Split.TRAIN: ["**"]},
dataset_info=hfh_dataset_info,
allowed_extensions=["jsonl", ".jsonl"],
)
# **.zipのURLをDict型として取得?
data_urls = DataFilesDict.from_hf_repo(
{datasets.Split.TRAIN: ["**"]},
dataset_info=hfh_dataset_info,
allowed_extensions=["zip", ".zip"],
)
data_paths = dict()
for path in data_path["train"]:
dname = dirname(path)
folder = basename(Path(dname))
data_paths[folder] = path
metadata_paths = dict()
for path in data_path["train"]:
dname = dirname(path)
folder = basename(Path(dname))
metadata_paths[folder] = path
gs = []
for split, files in data_paths.items():
'''
split : "train" or "test" or "val"
files : zip files
'''
# リポジトリからダウンロードしてとりあえずキャッシュしたURLリストを取得
metadata_path = dl_manager.download_and_extract(metadata_paths[split])
downloaded_files_path = dl_manager.download(files)
# 元のコードではzipファイルの中身を"filepath"としてそのまま_generate_exampleに引き渡している?
gs.append(
datasets.SplitGenerator(
name = split,
gen_kwargs={
"images" : dl_manager.iter_archive(downloaded_files_path),
"metadata_path": metadata_path
}
)
)
return gs
def _generate_examples(self, images, metadata_path):
"""Generate images and captions for splits."""
# with open(metadata_path, encoding="utf-8") as f:
# files_to_keep = set(f.read().split("\n"))
file_list = list()
caption_list = list()
dataIDX_list = list()
num_list = list()
label_list = list()
with open(metadata_path) as fin:
for line in fin:
data = json.loads(line)
file_list.append(data["file_name"])
caption_list.append(data["caption"])
dataIDX_list.append(data["data_idx"])
num_list.append(data["number"])
label_list.append(data["label"])
for idx, (file_path, file_obj) in enumerate(images):
yield file_path, {
"image": {
"path": file_path,
"bytes": file_obj.read()
},
"caption" : caption_list[idx],
"data_idx" : dataIDX_list[idx],
"number" : num_list[idx],
"label": label_list[idx]
}