|
import datasets |
|
import PIL.Image |
|
import PIL.ImageOps |
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {generated-usa-passeports-dataset}, |
|
author = {TrainingDataPro}, |
|
year = {2023} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Data generation in machine learning involves creating or manipulating data |
|
to train and evaluate machine learning models. The purpose of data generation |
|
is to provide diverse and representative examples that cover a wide range of |
|
scenarios, ensuring the model's robustness and generalization. |
|
Data augmentation techniques involve applying various transformations to |
|
existing data samples to create new ones. These transformations include: |
|
random rotations, translations, scaling, flips, and more. Augmentation helps |
|
in increasing the dataset size, introducing natural variations, and improving |
|
model performance by making it more invariant to specific transformations. |
|
The dataset contains **GENERATED** USA passports, which are replicas of |
|
official passports but with randomly generated details, such as name, date of |
|
birth etc. The primary intention of generating these fake passports is to |
|
demonstrate the structure and content of a typical passport document and to |
|
train the neural network to identify this type of document. |
|
Generated passports can assist in conducting research without accessing or |
|
compromising real user data that is often sensitive and subject to privacy |
|
regulations. Synthetic data generation allows researchers to develop and |
|
refine models using simulated passport data without risking privacy leaks. |
|
""" |
|
_NAME = 'generated-usa-passeports-dataset' |
|
|
|
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}" |
|
|
|
_LICENSE = "cc-by-nc-nd-4.0" |
|
|
|
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/" |
|
|
|
|
|
def exif_transpose(img): |
|
if not img: |
|
return img |
|
|
|
exif_orientation_tag = 274 |
|
|
|
|
|
if hasattr(img, "_getexif") and isinstance( |
|
img._getexif(), dict) and exif_orientation_tag in img._getexif(): |
|
exif_data = img._getexif() |
|
orientation = exif_data[exif_orientation_tag] |
|
|
|
|
|
if orientation == 1: |
|
|
|
pass |
|
elif orientation == 2: |
|
|
|
img = img.transpose(PIL.Image.FLIP_LEFT_RIGHT) |
|
elif orientation == 3: |
|
|
|
img = img.rotate(180) |
|
elif orientation == 4: |
|
|
|
img = img.rotate(180).transpose(PIL.Image.FLIP_LEFT_RIGHT) |
|
elif orientation == 5: |
|
|
|
img = img.rotate(-90, |
|
expand=True).transpose(PIL.Image.FLIP_LEFT_RIGHT) |
|
elif orientation == 6: |
|
|
|
img = img.rotate(-90, expand=True) |
|
elif orientation == 7: |
|
|
|
img = img.rotate(90, |
|
expand=True).transpose(PIL.Image.FLIP_LEFT_RIGHT) |
|
elif orientation == 8: |
|
|
|
img = img.rotate(90, expand=True) |
|
|
|
return img |
|
|
|
|
|
def load_image_file(file, mode='RGB'): |
|
|
|
img = PIL.Image.open(file) |
|
|
|
if hasattr(PIL.ImageOps, 'exif_transpose'): |
|
|
|
img = PIL.ImageOps.exif_transpose(img) |
|
else: |
|
|
|
img = exif_transpose(img) |
|
|
|
img = img.convert(mode) |
|
|
|
return img |
|
|
|
|
|
class GeneratedUsaPasseportsDataset(datasets.GeneratorBasedBuilder): |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
'original': datasets.Image(), |
|
'us_pass_augmentated_1': datasets.Image(), |
|
'us_pass_augmentated_2': datasets.Image(), |
|
'us_pass_augmentated_3': datasets.Image() |
|
}), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
license=_LICENSE) |
|
|
|
def _split_generators(self, dl_manager): |
|
original = dl_manager.download_and_extract(f"{_DATA}original.zip") |
|
augmentation = dl_manager.download_and_extract( |
|
f"{_DATA}augmentation.zip") |
|
annotations = dl_manager.download(f"{_DATA}{_NAME}.csv") |
|
original = dl_manager.iter_files(original) |
|
augmentation = dl_manager.iter_files(augmentation) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"original": original, |
|
'augmentation': augmentation, |
|
'annotations': annotations |
|
}), |
|
] |
|
|
|
def _generate_examples(self, original, augmentation, annotations): |
|
original = list(original) |
|
augmentation = list(augmentation) |
|
augmentation = [ |
|
augmentation[i:i + 3] for i in range(0, len(augmentation), 3) |
|
] |
|
|
|
for idx, (org, aug) in enumerate(zip(original, augmentation)): |
|
yield idx, { |
|
'original': load_image_file(org), |
|
'us_pass_augmentated_1': load_image_file(aug[0]), |
|
'us_pass_augmentated_2': load_image_file(aug[1]), |
|
'us_pass_augmentated_3': load_image_file(aug[2]) |
|
} |
|
|