generated-vietnamese-passeports-dataset / generated-vietnamese-passeports-dataset.py
vkashko's picture
fix: script
a022267
import datasets
import pandas as pd
_CITATION = """\
@InProceedings{huggingface:dataset,
title = {generated-vietnamese-passeports-dataset},
author = {TrainingDataPro},
year = {2023}
}
"""
_DESCRIPTION = """\
Data generation in machine learning involves creating or manipulating data to train
and evaluate machine learning models. The purpose of data generation is to provide
diverse and representative examples that cover a wide range of scenarios, ensuring the
model's robustness and generalization.
The dataset contains GENERATED Vietnamese passports, which are replicas of official
passports but with randomly generated details, such as name, date of birth etc.
The primary intention of generating these fake passports is to demonstrate the
structure and content of a typical passport document and to train the neural network to
identify this type of document.
Generated passports can assist in conducting research without accessing or compromising
real user data that is often sensitive and subject to privacy regulations. Synthetic
data generation allows researchers to *develop and refine models using simulated
passport data without risking privacy leaks*.
"""
_NAME = "generated-vietnamese-passeports-dataset"
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}"
_LICENSE = ""
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/"
class GeneratedVietnamesePasseportsDataset(datasets.GeneratorBasedBuilder):
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{"id": datasets.Value("int32"), "image": datasets.Image()}
),
supervised_keys=None,
homepage=_HOMEPAGE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
images = dl_manager.download(f"{_DATA}images.tar.gz")
annotations = dl_manager.download(f"{_DATA}{_NAME}.csv")
images = dl_manager.iter_archive(images)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={
"images": images,
"annotations": annotations,
},
),
]
def _generate_examples(self, images, annotations):
annotations_df = pd.read_csv(annotations)
for idx, (image_path, image) in enumerate(images):
yield idx, {
"id": annotations_df.loc[annotations_df["image"] == image_path][
"image_id"
].values[0],
"image": {"path": image_path, "bytes": image.read()},
}