khaclinh commited on
Commit
4bf0340
1 Parent(s): e88472e

Create new file

Browse files
Files changed (1) hide show
  1. testdata.py +149 -0
testdata.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """PP4AV dataset."""
16
+
17
+ import os
18
+ import glob as glob
19
+ from tqdm import tqdm
20
+ from pathlib import Path
21
+ import datasets
22
+
23
+
24
+ _HOMEPAGE = "http://shuoyang1213.me/WIDERFACE/"
25
+
26
+ _LICENSE = "Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International (CC BY-NC-ND 4.0)"
27
+
28
+ _CITATION = """\
29
+ @inproceedings{yang2016wider,
30
+ Author = {Yang, Shuo and Luo, Ping and Loy, Chen Change and Tang, Xiaoou},
31
+ Booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)},
32
+ Title = {WIDER FACE: A Face Detection Benchmark},
33
+ Year = {2016}}
34
+ """
35
+
36
+ _DESCRIPTION = """\
37
+ WIDER FACE dataset is a face detection benchmark dataset, of which images are
38
+ selected from the publicly available WIDER dataset. We choose 32,203 images and
39
+ label 393,703 faces with a high degree of variability in scale, pose and
40
+ occlusion as depicted in the sample images. WIDER FACE dataset is organized
41
+ based on 61 event classes. For each event class, we randomly select 40%/10%/50%
42
+ data as training, validation and testing sets. We adopt the same evaluation
43
+ metric employed in the PASCAL VOC dataset. Similar to MALF and Caltech datasets,
44
+ we do not release bounding box ground truth for the test images. Users are
45
+ required to submit final prediction files, which we shall proceed to evaluate.
46
+ """
47
+
48
+
49
+ _REPO = "https://huggingface.co/datasets/testdata/resolve/main/data"
50
+ _URLS = {
51
+ "fisheye": f"{_REPO}/fisheye.zip",
52
+ "annot": f"{_REPO}/annotations.zip",
53
+ }
54
+
55
+ IMG_EXT = ['png', 'jpeg', 'jpg']
56
+
57
+
58
+ class TestData(datasets.GeneratorBasedBuilder):
59
+ """WIDER FACE dataset."""
60
+
61
+ VERSION = datasets.Version("1.0.0")
62
+
63
+ def _info(self):
64
+ return datasets.DatasetInfo(
65
+ description=_DESCRIPTION,
66
+ features=datasets.Features(
67
+ {
68
+ "image": datasets.Image(),
69
+ "faces": datasets.Sequence(
70
+ {
71
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4)
72
+ }
73
+ ),
74
+ "plates": datasets.Sequence(
75
+ {
76
+ "bbox": datasets.Sequence(datasets.Value("float32"), length=4)
77
+ }
78
+ ),
79
+ }
80
+ ),
81
+ supervised_keys=None,
82
+ homepage=_HOMEPAGE,
83
+ license=_LICENSE,
84
+ citation=_CITATION,
85
+ )
86
+
87
+ def _generate_examples(self, data_dir, annot_dir):
88
+ image_dir = os.path.join(data_dir, "fisheye")
89
+ anno_dir = os.path.join(data_dir, "annotations")
90
+ files = []
91
+ for file_type in IMG_EXT:
92
+ files.extend(list(Path(image_dir).glob(f'**/*.{file_type}')))
93
+
94
+ for image_path in tqdm(files):
95
+ img_relative_path = image_path.relative_to(image_dir)
96
+ gt_pah = (Path(label_dir) / src_img_relative_path).with_suffix('.txt')
97
+
98
+
99
+
100
+ yield idx, {"image": image_file_path, "faces": faces, "plates": plates}
101
+
102
+ with open(os.path.join(annot_dir, "wider_face_split", annot_fname), "r", encoding="utf-8") as f:
103
+ idx = 0
104
+ while True:
105
+ line = f.readline()
106
+ line = line.rstrip()
107
+ if not line.endswith(".jpg"):
108
+ break
109
+ image_file_path = os.path.join(image_dir, line)
110
+ faces = []
111
+ if split != "test":
112
+ # Read number of bounding boxes
113
+ nbboxes = int(f.readline())
114
+ # Cases with 0 bounding boxes, still have one line with all zeros.
115
+ # So we have to read it and discard it.
116
+ if nbboxes == 0:
117
+ f.readline()
118
+ else:
119
+ for _ in range(nbboxes):
120
+ line = f.readline()
121
+ line = line.rstrip()
122
+ line_split = line.split()
123
+ assert len(line_split) == 10, f"Cannot parse line: {line_split}"
124
+ line_parsed = [int(n) for n in line_split]
125
+ (
126
+ xmin,
127
+ ymin,
128
+ wbox,
129
+ hbox,
130
+ blur,
131
+ expression,
132
+ illumination,
133
+ invalid,
134
+ occlusion,
135
+ pose,
136
+ ) = line_parsed
137
+ faces.append(
138
+ {
139
+ "bbox": [xmin, ymin, wbox, hbox],
140
+ "blur": blur,
141
+ "expression": expression,
142
+ "illumination": illumination,
143
+ "occlusion": occlusion,
144
+ "pose": pose,
145
+ "invalid": invalid,
146
+ }
147
+ )
148
+ yield idx, {"image": image_file_path, "faces": faces}
149
+ idx += 1