|
|
|
"""CelebA FACE dataset.""" |
|
|
|
import os |
|
import datasets |
|
|
|
_HOMEPAGE = "https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html" |
|
|
|
_LICENSE = "Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND 4.0)" |
|
|
|
_CITATION = """\ |
|
@inproceedings{liu2015faceattributes, |
|
title = {Deep Learning Face Attributes in the Wild}, |
|
author = {Liu, Ziwei and Luo, Ping and Wang, Xiaogang and Tang, Xiaoou}, |
|
booktitle = {Proceedings of International Conference on Computer Vision (ICCV)}, |
|
month = {December}, |
|
year = {2015} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
CelebFaces Attributes Dataset (CelebA) is a large-scale face attributes dataset with more than 200K celebrity images, |
|
each with 40 attribute annotations. The images in this dataset cover large pose variations and background clutter. |
|
CelebA has large diversities, large quantities, and rich annotations, including 10,177 number of identities, 202,599 number of face images, |
|
and 5 landmark locations, 40 binary attributes annotations per image. |
|
""" |
|
|
|
_REPO = "https://huggingface.co/datasets/hfaus/CelebA_bbox_and_facepoints/resolve/main/data" |
|
_URLS = { |
|
"train": f"{_REPO}/celebA_train.zip", |
|
"validation": f"{_REPO}/celebA_val.zip", |
|
"test": f"{_REPO}/celebA_test.zip" |
|
} |
|
|
|
class CelebA(datasets.GeneratorBasedBuilder): |
|
"""CelebA dataset.""" |
|
|
|
VERSION = datasets.Version("1.0.0") |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"image": datasets.Image(), |
|
"faces": datasets.Sequence( |
|
{ |
|
"bbox": datasets.Sequence(datasets.Value("float32"), length=4), |
|
"points": datasets.Sequence(datasets.Value("int"), length=10) |
|
} |
|
), |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_HOMEPAGE, |
|
license=_LICENSE, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
data_dir = dl_manager.download_and_extract(_URLS) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"split": "train", |
|
"data_dir": data_dir["train"] |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"split": "test", |
|
"data_dir": data_dir["test"] |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"split": "val", |
|
"data_dir": data_dir["validation"] |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, split, data_dir): |
|
image_dir = os.path.join(data_dir) |
|
bbox_fname = "list_bbox_celeba.txt" |
|
landmarks_fname = "list_landmarks_celeba.txt" |
|
|
|
|
|
fichero1 = open(bbox_fname, "r", encoding="utf-8") |
|
fichero2 = open(landmarks_fname, "r", encoding="utf-8") |
|
|
|
|
|
lista = fichero1.readlines() |
|
for i, bbox_line in enumerate(lista): |
|
|
|
|
|
bbox_line = bbox_line.rstrip() |
|
if not ".jpg" in bbox_line: |
|
break |
|
landmarks_line = fichero1.readline(i) |
|
bbox = " ".join(bbox_line.split()).split(" ") |
|
landmarks = " ".join(landmarks_line.split()).split(" ") |
|
image_name = bbox[0]; |
|
image_file_path = os.path.join(image_dir, bbox[0]) |
|
|
|
|
|
bbox_total = [int(bbox[1]), int(bbox[2]), int(bbox[3]), int(bbox[4])] |
|
facial_landmarks = { |
|
'lefteye': (landmarks[1], landmarks[2]), |
|
'righteye': (landmarks[3], landmarks[4]), |
|
'nose': (landmarks[5], landmarks[6]), |
|
'leftmouth': (landmarks[7], landmarks[8]), |
|
'rightmouth': (landmarks[9], landmarks[10]), |
|
} |
|
|
|
|
|
|
|
if nbboxes == 0: |
|
f.readline() |
|
else: |
|
for _ in range(nbboxes): |
|
line = f.readline() |
|
line = line.rstrip() |
|
line_split = line.split() |
|
assert len(line_split) == 10, f"Cannot parse line: {line_split}" |
|
line_parsed = [int(n) for n in line_split] |
|
( |
|
xmin, |
|
ymin, |
|
wbox, |
|
hbox, |
|
blur, |
|
expression, |
|
illumination, |
|
invalid, |
|
occlusion, |
|
pose, |
|
) = line_parsed |
|
faces.append( |
|
{ |
|
"bbox": [xmin, ymin, wbox, hbox], |
|
"blur": blur, |
|
"expression": expression, |
|
"illumination": illumination, |
|
"occlusion": occlusion, |
|
"pose": pose, |
|
"invalid": invalid, |
|
} |
|
) |
|
yield idx, {"image": image_file_path, "facial_landmarks": facial_landmarks, "bbox": bbox_total} |
|
idx += 1 |