kanjis2radicals / kanjis2radicals.py
martingrzzler's picture
Update kanjis2radicals.py
9e6a0f5
raw
history blame
2.61 kB
import datasets
import os
import json
_DESCRIPTION = """\
Contains Kanji images with corresponding radicals ids from WaniKani or https://api.robanohashi.org/docs/index.html
"""
_METADATA_URL = "https://huggingface.co/datasets/martingrzzler/kanjis2radicals/raw/main/kanji_metadata.jsonl"
_IMAGES_URL = "https://huggingface.co/datasets/martingrzzler/kanjis2radicals/resolve/main/kanji.tar.gz"
class Kanji2Radicals(datasets.GeneratorBasedBuilder):
"""Kanji to radicals dataset."""
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"kanji_image": datasets.Image(),
"radicals": {
"id": datasets.Value("int32"),
"characters": datasets.Value("string"),
"meanings": datasets.Value("string"),
"radicals": datasets.Sequence(
{
"characters": datasets.Value("string"),
"id": datasets.Value("int32"),
"slug": datasets.Value("string"),
}
),
},
}
),
supervised_keys=None,
homepage="https://robanohashi.org/",
)
def _split_generators(self, dl_manager):
metadata_path = dl_manager.download_and_extract(_METADATA_URL)
images_path = dl_manager.download_and_extract(_IMAGES_URL)
images_iter = dl_manager.iter_archive(images_path)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"metadata_path": metadata_path,
"images_iter": images_iter,
},
),
]
def _generate_examples(self, metadata_path, images_iter):
radicals = []
with open(metadata_path, encoding="utf-8") as f:
for line in f:
metadata = json.loads(line)
radicals.append(metadata)
for idx, (image_path, image) in enumerate(images_iter):
print(f"Processing image {idx}...")
print(image_path)
if not os.path.isdir(image_path): # Add this check
print("Skipping")
print(image_path)
continue
yield image_path, {
"radicals": radicals[idx],
"kanji_image": image.read(),
}