|
import datasets |
|
import pandas as pd |
|
|
|
_CITATION = """\ |
|
@InProceedings{huggingface:dataset, |
|
title = {selfie_and_video}, |
|
author = {TrainingDataPro}, |
|
year = {2023} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
4000 people in this dataset. Each person took a selfie on a webcam, |
|
took a selfie on a mobile phone. In addition, people recorded video from |
|
the phone and from the webcam, on which they pronounced a given set of numbers. |
|
Includes folders corresponding to people in the dataset. Each folder includes |
|
8 files (4 images and 4 videos). |
|
""" |
|
_NAME = 'selfie_and_video' |
|
|
|
_HOMEPAGE = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}" |
|
|
|
_LICENSE = "" |
|
|
|
_DATA = f"https://huggingface.co/datasets/TrainingDataPro/{_NAME}/resolve/main/data/" |
|
|
|
|
|
class SelfieAndVideo(datasets.GeneratorBasedBuilder): |
|
"""Small sample of image-text pairs""" |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features({ |
|
'photo_1': datasets.Image(), |
|
'photo_2': datasets.Image(), |
|
'video_3': datasets.Value('string'), |
|
'video_4': datasets.Value('string'), |
|
'photo_5': datasets.Image(), |
|
'photo_6': datasets.Image(), |
|
'video_7': datasets.Value('string'), |
|
'video_8': datasets.Value('string'), |
|
'set_id': datasets.Value('string'), |
|
'worker_id': datasets.Value('string'), |
|
'age': datasets.Value('int8'), |
|
'country': datasets.Value('string'), |
|
'gender': datasets.Value('string') |
|
}), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
images = dl_manager.download(f"{_DATA}data.tar.gz") |
|
annotations = dl_manager.download(f"{_DATA}{_NAME}.csv") |
|
images = dl_manager.iter_archive(images) |
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"images": images, |
|
'annotations': annotations |
|
}), |
|
] |
|
|
|
def _generate_examples(self, images, annotations): |
|
annotations_df = pd.read_csv(annotations, sep=';') |
|
images_data = pd.DataFrame(columns=['Link', 'Bytes']) |
|
for idx, (image_path, image) in enumerate(images): |
|
if image_path.lower().endswith('.jpg'): |
|
images_data.loc[idx] = { |
|
'Link': image_path, |
|
'Bytes': image.read() |
|
} |
|
|
|
annotations_df = pd.merge(annotations_df, |
|
images_data, |
|
on=['Link'], |
|
how='left') |
|
|
|
for idx, worker_id in enumerate(pd.unique(annotations_df['WorkerId'])): |
|
annotation = annotations_df.loc[annotations_df['WorkerId'] == |
|
worker_id] |
|
annotation = annotation.sort_values(['Link']) |
|
data = { |
|
(f'photo_{row[7][37]}' if row[7].lower().endswith('.jpg') else f'video_{row[7][37]}'): |
|
({ |
|
'path': row[7], |
|
'bytes': row[8] |
|
} if row[7].lower().endswith('.jpg') else row[7]) |
|
for row in annotation.itertuples() |
|
} |
|
|
|
age = annotation.loc[annotation['Link'].str.lower().str.endswith( |
|
'1.jpg')]['Age'].values[0] |
|
country = annotation.loc[annotation['Link'].str.lower().str. |
|
endswith('1.jpg')]['Country'].values[0] |
|
gender = annotation.loc[annotation['Link'].str.lower().str. |
|
endswith('1.jpg')]['Gender'].values[0] |
|
set_id = annotation.loc[annotation['Link'].str.lower().str. |
|
endswith('1.jpg')]['SetId'].values[0] |
|
|
|
data['worker_id'] = worker_id |
|
data['age'] = age |
|
data['country'] = country |
|
data['gender'] = gender |
|
data['set_id'] = set_id |
|
|
|
yield idx, data |
|
|