Datasets:
Tasks:
Image Classification
Size:
1K - 10K
import os | |
import datasets | |
from datasets.tasks import ImageClassification | |
_HOMEPAGE = "https://universe.roboflow.com/robert-demo-qvail/pokedex/dataset/14" | |
_LICENSE = "Public Domain" | |
_CITATION = """\ | |
@misc{ pokedex_dataset, | |
title = { Pokedex Dataset }, | |
type = { Open Source Dataset }, | |
author = { Lance Zhang }, | |
howpublished = { \\url{ https://universe.roboflow.com/robert-demo-qvail/pokedex } }, | |
url = { https://universe.roboflow.com/robert-demo-qvail/pokedex }, | |
journal = { Roboflow Universe }, | |
publisher = { Roboflow }, | |
year = { 2022 }, | |
month = { dec }, | |
note = { visited on 2023-01-14 }, | |
} | |
""" | |
_CATEGORIES = ['Golbat', 'Machoke', 'Omastar', 'Diglett', 'Lapras', 'Kabuto', 'Persian', 'Weepinbell', 'Golem', 'Dodrio', 'Raichu', 'Zapdos', 'Raticate', 'Magnemite', 'Ivysaur', 'Growlithe', 'Tangela', 'Drowzee', 'Rapidash', 'Venonat', 'Pidgeot', 'Nidorino', 'Porygon', 'Lickitung', 'Rattata', 'Machop', 'Charmeleon', 'Slowbro', 'Parasect', 'Eevee', 'Starmie', 'Staryu', 'Psyduck', 'Dragonair', 'Magikarp', 'Vileplume', 'Marowak', 'Pidgeotto', 'Shellder', 'Mewtwo', 'Farfetchd', 'Kingler', 'Seel', 'Kakuna', 'Doduo', 'Electabuzz', 'Charmander', 'Rhyhorn', 'Tauros', 'Dugtrio', 'Poliwrath', 'Gengar', 'Exeggutor', 'Dewgong', 'Jigglypuff', 'Geodude', 'Kadabra', 'Nidorina', 'Sandshrew', 'Grimer', 'MrMime', 'Pidgey', 'Koffing', 'Ekans', 'Alolan Sandslash', 'Venusaur', 'Snorlax', 'Paras', 'Jynx', 'Chansey', 'Hitmonchan', 'Gastly', 'Kangaskhan', 'Oddish', 'Wigglytuff', 'Graveler', 'Arcanine', 'Clefairy', 'Articuno', 'Poliwag', 'Abra', 'Squirtle', 'Voltorb', 'Ponyta', 'Moltres', 'Nidoqueen', 'Magmar', 'Onix', 'Vulpix', 'Butterfree', 'Krabby', 'Arbok', 'Clefable', 'Goldeen', 'Magneton', 'Dratini', 'Caterpie', 'Jolteon', 'Nidoking', 'Alakazam', 'Dragonite', 'Fearow', 'Slowpoke', 'Weezing', 'Beedrill', 'Weedle', 'Cloyster', 'Vaporeon', 'Gyarados', 'Golduck', 'Machamp', 'Hitmonlee', 'Primeape', 'Cubone', 'Sandslash', 'Scyther', 'Haunter', 'Metapod', 'Tentacruel', 'Aerodactyl', 'Kabutops', 'Ninetales', 'Zubat', 'Rhydon', 'Mew', 'Pinsir', 'Ditto', 'Victreebel', 'Omanyte', 'Horsea', 'Pikachu', 'Blastoise', 'Venomoth', 'Charizard', 'Seadra', 'Muk', 'Spearow', 'Bulbasaur', 'Bellsprout', 'Electrode', 'Gloom', 'Poliwhirl', 'Flareon', 'Seaking', 'Hypno', 'Wartortle', 'Mankey', 'Tentacool', 'Exeggcute', 'Meowth'] | |
class POKEMONCLASSIFICATIONConfig(datasets.BuilderConfig): | |
"""Builder Config for pokemon-classification""" | |
def __init__(self, data_urls, **kwargs): | |
""" | |
BuilderConfig for pokemon-classification. | |
Args: | |
data_urls: `dict`, name to url to download the zip file from. | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(POKEMONCLASSIFICATIONConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs) | |
self.data_urls = data_urls | |
class POKEMONCLASSIFICATION(datasets.GeneratorBasedBuilder): | |
"""pokemon-classification image classification dataset""" | |
VERSION = datasets.Version("1.0.0") | |
BUILDER_CONFIGS = [ | |
POKEMONCLASSIFICATIONConfig( | |
name="full", | |
description="Full version of pokemon-classification dataset.", | |
data_urls={ | |
"train": "https://huggingface.co/datasets/fcakyon/pokemon-classification/resolve/main/data/train.zip", | |
"validation": "https://huggingface.co/datasets/fcakyon/pokemon-classification/resolve/main/data/valid.zip", | |
"test": "https://huggingface.co/datasets/fcakyon/pokemon-classification/resolve/main/data/test.zip", | |
} | |
, | |
), | |
POKEMONCLASSIFICATIONConfig( | |
name="mini", | |
description="Mini version of pokemon-classification dataset.", | |
data_urls={ | |
"train": "https://huggingface.co/datasets/fcakyon/pokemon-classification/resolve/main/data/valid-mini.zip", | |
"validation": "https://huggingface.co/datasets/fcakyon/pokemon-classification/resolve/main/data/valid-mini.zip", | |
"test": "https://huggingface.co/datasets/fcakyon/pokemon-classification/resolve/main/data/valid-mini.zip", | |
}, | |
) | |
] | |
def _info(self): | |
return datasets.DatasetInfo( | |
features=datasets.Features( | |
{ | |
"image_file_path": datasets.Value("string"), | |
"image": datasets.Image(), | |
"labels": datasets.features.ClassLabel(names=_CATEGORIES), | |
} | |
), | |
supervised_keys=("image", "labels"), | |
homepage=_HOMEPAGE, | |
citation=_CITATION, | |
license=_LICENSE, | |
task_templates=[ImageClassification(image_column="image", label_column="labels")], | |
) | |
def _split_generators(self, dl_manager): | |
data_files = dl_manager.download_and_extract(self.config.data_urls) | |
return [ | |
datasets.SplitGenerator( | |
name=datasets.Split.TRAIN, | |
gen_kwargs={ | |
"files": dl_manager.iter_files([data_files["train"]]), | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.VALIDATION, | |
gen_kwargs={ | |
"files": dl_manager.iter_files([data_files["validation"]]), | |
}, | |
), | |
datasets.SplitGenerator( | |
name=datasets.Split.TEST, | |
gen_kwargs={ | |
"files": dl_manager.iter_files([data_files["test"]]), | |
}, | |
), | |
] | |
def _generate_examples(self, files): | |
for i, path in enumerate(files): | |
file_name = os.path.basename(path) | |
if file_name.endswith((".jpg", ".png", ".jpeg", ".bmp", ".tif", ".tiff")): | |
yield i, { | |
"image_file_path": path, | |
"image": path, | |
"labels": os.path.basename(os.path.dirname(path)), | |
} | |