|
import datasets |
|
import pandas as pd |
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {license_plates}, |
|
author = {TrainingDataPro}, |
|
year = {2023} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Over 1.2 million annotated license plates from vehicles around the world. |
|
This dataset is tailored for License Plate Recognition tasks and includes |
|
images from both YouTube and PlatesMania. |
|
Annotation details are provided in the About section below. |
|
""" |
|
_NAME = 'license_plates' |
|
|
|
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}" |
|
|
|
_LICENSE = "" |
|
|
|
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/" |
|
|
|
|
|
class LicensePlates(datasets.GeneratorBasedBuilder): |
|
|
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig(name="Brazil_youtube"), |
|
datasets.BuilderConfig(name="Estonia_platesmania"), |
|
datasets.BuilderConfig(name="Finland_platesmania"), |
|
datasets.BuilderConfig(name="Kazakhstan_platesmania"), |
|
datasets.BuilderConfig(name="Kazakhstan_youtube"), |
|
datasets.BuilderConfig(name="Lithuania_platesmania"), |
|
datasets.BuilderConfig(name="Serbia_platesmania"), |
|
datasets.BuilderConfig(name="Serbia_youtube"), |
|
datasets.BuilderConfig(name="UAE_platesmania"), |
|
datasets.BuilderConfig(name="UAE_youtube") |
|
] |
|
|
|
DEFAULT_CONFIG_NAME = "Brazil" |
|
|
|
def _info(self): |
|
|
|
features = datasets.Features({ |
|
'bbox_id': datasets.Value('uint32'), |
|
'bbox': datasets.Value('string'), |
|
'image': datasets.Image(), |
|
'labeled_image': datasets.Image(), |
|
'license_plate.id': datasets.Value('string'), |
|
'license_plate.visibility': datasets.Value('string'), |
|
'license_plate.rows_count': datasets.Value('uint8'), |
|
'license_plate.number': datasets.Value('string'), |
|
'license_plate.serial': datasets.Value('string'), |
|
'license_plate.country': datasets.Value('string'), |
|
'license_plate.mask': datasets.Value('string') |
|
}) |
|
|
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=features, |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data = dl_manager.download(f"{_DATA}{self.config.name}.tar.gz") |
|
data = dl_manager.iter_archive(data) |
|
annotations = dl_manager.download(f'{_DATA}{self.config.name}.csv') |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data": data, |
|
'annotations': annotations |
|
}), |
|
] |
|
|
|
def _generate_examples(self, data, annotations): |
|
|
|
annotations_df = pd.read_csv(annotations, sep=',', index_col=0) |
|
images = {} |
|
|
|
for idx, (file_path, file) in enumerate(data): |
|
file_name = file_path.split('/')[-1] |
|
images[file_name] = (file_path, file.read()) |
|
|
|
annotations_df.drop( |
|
columns=['license_plate.region', 'license_plate.color'], |
|
inplace=True, |
|
errors='ignore') |
|
|
|
annotations_df.fillna(0, inplace=True) |
|
annotations_df.sort_values(by='file_name', inplace=True) |
|
|
|
for row in annotations_df.itertuples(index=True): |
|
image = images[row[1]] |
|
name, ext = row[1].split('.') |
|
labeled_image = images[f'{name}_labeled.{ext}'] |
|
|
|
yield idx, { |
|
'bbox_id': row[0], |
|
'bbox': row[2], |
|
"image": { |
|
"path": image[0], |
|
"bytes": image[1] |
|
}, |
|
"labeled_image": { |
|
"path": labeled_image[0], |
|
"bytes": labeled_image[1] |
|
}, |
|
'license_plate.id': row[3], |
|
'license_plate.visibility': row[4], |
|
'license_plate.rows_count': row[5], |
|
'license_plate.number': row[6], |
|
'license_plate.serial': row[7], |
|
'license_plate.country': row[8], |
|
'license_plate.mask': row[9] |
|
} |
|
|