hfaus commited on
Commit
66ede7e
1 Parent(s): 2769b78

Upload CelebA_bbox_and_facepoints.py

Browse files
Files changed (1) hide show
  1. CelebA_bbox_and_facepoints.py +154 -0
CelebA_bbox_and_facepoints.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ """CelebA FACE dataset."""
3
+
4
+ import os
5
+ import datasets
6
+
7
+ _HOMEPAGE = "https://mmlab.ie.cuhk.edu.hk/projects/CelebA.html"
8
+
9
+ _LICENSE = "Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND 4.0)"
10
+
11
+ _CITATION = """\
12
+ @inproceedings{liu2015faceattributes,
13
+ title = {Deep Learning Face Attributes in the Wild},
14
+ author = {Liu, Ziwei and Luo, Ping and Wang, Xiaogang and Tang, Xiaoou},
15
+ booktitle = {Proceedings of International Conference on Computer Vision (ICCV)},
16
+ month = {December},
17
+ year = {2015}
18
+ }
19
+ """
20
+
21
+ _DESCRIPTION = """\
22
+ CelebFaces Attributes Dataset (CelebA) is a large-scale face attributes dataset with more than 200K celebrity images,
23
+ each with 40 attribute annotations. The images in this dataset cover large pose variations and background clutter.
24
+ CelebA has large diversities, large quantities, and rich annotations, including 10,177 number of identities, 202,599 number of face images,
25
+ and 5 landmark locations, 40 binary attributes annotations per image.
26
+ """
27
+
28
+ _REPO = "https://huggingface.co/datasets/hfaus/CelebA_bbox_and_facepoints/resolve/main/data"
29
+ _URLS = {
30
+ "train": f"{_REPO}/celebA_train.zip",
31
+ "validation": f"{_REPO}/celebA_val.zip",
32
+ "test": f"{_REPO}/celebA_test.zip"
33
+ }
34
+
35
+ class CelebA(datasets.GeneratorBasedBuilder):
36
+ """CelebA dataset."""
37
+
38
+ VERSION = datasets.Version("1.0.0")
39
+
40
+ def _info(self):
41
+ return datasets.DatasetInfo(
42
+ description=_DESCRIPTION,
43
+ features=datasets.Features(
44
+ {
45
+ "image": datasets.Image(),
46
+ "faces": datasets.Sequence(
47
+ {
48
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4),
49
+ "points": datasets.Sequence(datasets.Value("int"), length=10)
50
+ }
51
+ ),
52
+ }
53
+ ),
54
+ supervised_keys=None,
55
+ homepage=_HOMEPAGE,
56
+ license=_LICENSE,
57
+ citation=_CITATION,
58
+ )
59
+
60
+ def _split_generators(self, dl_manager):
61
+ data_dir = dl_manager.download_and_extract(_URLS)
62
+ return [
63
+ datasets.SplitGenerator(
64
+ name=datasets.Split.TRAIN,
65
+ gen_kwargs={
66
+ "split": "train",
67
+ "data_dir": data_dir["train"]
68
+ },
69
+ ),
70
+ datasets.SplitGenerator(
71
+ name=datasets.Split.TEST,
72
+ gen_kwargs={
73
+ "split": "test",
74
+ "data_dir": data_dir["test"]
75
+ },
76
+ ),
77
+ datasets.SplitGenerator(
78
+ name=datasets.Split.VALIDATION,
79
+ gen_kwargs={
80
+ "split": "val",
81
+ "data_dir": data_dir["validation"]
82
+ },
83
+ ),
84
+ ]
85
+
86
+ def _generate_examples(self, split, data_dir):
87
+ image_dir = os.path.join(data_dir)
88
+ bbox_fname = "list_bbox_celeba.txt"
89
+ landmarks_fname = "list_landmarks_celeba.txt"
90
+
91
+ #Abrimos los dos ficheros
92
+ fichero1 = open(bbox_fname, "r", encoding="utf-8")
93
+ fichero2 = open(landmarks_fname, "r", encoding="utf-8")
94
+
95
+ #Creamos una lista a partir del contenido de Fichero2
96
+ lista = fichero1.readlines()
97
+ for i, bbox_line in enumerate(lista):
98
+
99
+ # Se leen las líneas de ambos ficheros
100
+ bbox_line = bbox_line.rstrip()
101
+ if not ".jpg" in bbox_line:
102
+ break
103
+ landmarks_line = fichero1.readline(i)
104
+ bbox = " ".join(bbox_line.split()).split(" ")
105
+ landmarks = " ".join(landmarks_line.split()).split(" ")
106
+ image_name = bbox[0];
107
+ image_file_path = os.path.join(image_dir, bbox[0])
108
+
109
+ # Read number of bounding boxes
110
+ bbox_total = [int(bbox[1]), int(bbox[2]), int(bbox[3]), int(bbox[4])]
111
+ facial_landmarks = {
112
+ 'lefteye': (landmarks[1], landmarks[2]),
113
+ 'righteye': (landmarks[3], landmarks[4]),
114
+ 'nose': (landmarks[5], landmarks[6]),
115
+ 'leftmouth': (landmarks[7], landmarks[8]),
116
+ 'rightmouth': (landmarks[9], landmarks[10]),
117
+ }
118
+
119
+ # Cases with 0 bounding boxes, still have one line with all zeros.
120
+ # So we have to read it and discard it.
121
+ if nbboxes == 0:
122
+ f.readline()
123
+ else:
124
+ for _ in range(nbboxes):
125
+ line = f.readline()
126
+ line = line.rstrip()
127
+ line_split = line.split()
128
+ assert len(line_split) == 10, f"Cannot parse line: {line_split}"
129
+ line_parsed = [int(n) for n in line_split]
130
+ (
131
+ xmin,
132
+ ymin,
133
+ wbox,
134
+ hbox,
135
+ blur,
136
+ expression,
137
+ illumination,
138
+ invalid,
139
+ occlusion,
140
+ pose,
141
+ ) = line_parsed
142
+ faces.append(
143
+ {
144
+ "bbox": [xmin, ymin, wbox, hbox],
145
+ "blur": blur,
146
+ "expression": expression,
147
+ "illumination": illumination,
148
+ "occlusion": occlusion,
149
+ "pose": pose,
150
+ "invalid": invalid,
151
+ }
152
+ )
153
+ yield idx, {"image": image_file_path, "facial_landmarks": facial_landmarks, "bbox": bbox_total}
154
+ idx += 1