File size: 17,412 Bytes
b5f33fd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
import logging
import os
from functools import partial
from multiprocessing.pool import ThreadPool
from typing import Dict, List, Optional, Tuple

import cv2
import numpy as np
from mivolo.data.data_reader import AnnotType, PictureInfo, get_all_files, read_csv_annotation_file
from mivolo.data.misc import IOU, class_letterbox, cropout_black_parts
from timm.data.readers.reader import Reader
from tqdm import tqdm

CROP_ROUND_TOL = 0.3
MIN_PERSON_SIZE = 100
MIN_PERSON_CROP_AFTERCUT_RATIO = 0.4

_logger = logging.getLogger("ReaderAgeGender")


class ReaderAgeGender(Reader):
    """
    Reader for almost original imdb-wiki cleaned dataset.
    Two changes:
        1. Your annotation must be in ./annotation subdir of dataset root
        2. Images must be in images subdir

    """

    def __init__(
        self,
        images_path,
        annotations_path,
        split="validation",
        target_size=224,
        min_size=5,
        seed=1234,
        with_persons=False,
        min_person_size=MIN_PERSON_SIZE,
        disable_faces=False,
        only_age=False,
        min_person_aftercut_ratio=MIN_PERSON_CROP_AFTERCUT_RATIO,
        crop_round_tol=CROP_ROUND_TOL,
    ):
        super().__init__()

        self.with_persons = with_persons
        self.disable_faces = disable_faces
        self.only_age = only_age

        # can be only black for now, even though it's not very good with further normalization
        self.crop_out_color = (0, 0, 0)

        self.empty_crop = np.ones((target_size, target_size, 3)) * self.crop_out_color
        self.empty_crop = self.empty_crop.astype(np.uint8)

        self.min_person_size = min_person_size
        self.min_person_aftercut_ratio = min_person_aftercut_ratio
        self.crop_round_tol = crop_round_tol

        self.split = split
        self.min_size = min_size
        self.seed = seed
        self.target_size = target_size

        # Reading annotations. Can be multiple files if annotations_path dir
        self._ann: Dict[str, List[PictureInfo]] = {}  # list of samples for each image
        self._associated_objects: Dict[str, Dict[int, List[List[int]]]] = {}
        self._faces_list: List[Tuple[str, int]] = []  # samples from this list will be loaded in __getitem__

        self._read_annotations(images_path, annotations_path)
        _logger.info(f"Dataset length: {len(self._faces_list)} crops")

    def __getitem__(self, index):
        return self._read_img_and_label(index)

    def __len__(self):
        return len(self._faces_list)

    def _filename(self, index, basename=False, absolute=False):
        img_p = self._faces_list[index][0]
        return os.path.basename(img_p) if basename else img_p

    def _read_annotations(self, images_path, csvs_path):
        self._ann = {}
        self._faces_list = []
        self._associated_objects = {}

        csvs = get_all_files(csvs_path, [".csv"])
        csvs = [c for c in csvs if self.split in os.path.basename(c)]

        # load annotations per image
        for csv in csvs:
            db, ann_type = read_csv_annotation_file(csv, images_path)
            if self.with_persons and ann_type != AnnotType.PERSONS:
                raise ValueError(
                    f"Annotation type in file {csv} contains no persons, "
                    f"but annotations with persons are requested."
                )
            self._ann.update(db)

        if len(self._ann) == 0:
            raise ValueError("Annotations are empty!")

        self._ann, self._associated_objects = self.prepare_annotations()
        images_list = list(self._ann.keys())

        for img_path in images_list:
            for index, image_sample_info in enumerate(self._ann[img_path]):
                assert image_sample_info.has_gt(
                    self.only_age
                ), "Annotations must be checked with self.prepare_annotations() func"
                self._faces_list.append((img_path, index))

    def _read_img_and_label(self, index):
        if not isinstance(index, int):
            raise TypeError("ReaderAgeGender expected index to be integer")

        img_p, face_index = self._faces_list[index]
        ann: PictureInfo = self._ann[img_p][face_index]
        img = cv2.imread(img_p)

        face_empty = True
        if ann.has_face_bbox and not (self.with_persons and self.disable_faces):
            face_crop, face_empty = self._get_crop(ann.bbox, img)

        if not self.with_persons and face_empty:
            # model without persons
            raise ValueError("Annotations must be checked with self.prepare_annotations() func")

        if face_empty:
            face_crop = self.empty_crop

        person_empty = True
        if self.with_persons or self.disable_faces:
            if ann.has_person_bbox:
                # cut off all associated objects from person crop
                objects = self._associated_objects[img_p][face_index]
                person_crop, person_empty = self._get_crop(
                    ann.person_bbox,
                    img,
                    crop_out_color=self.crop_out_color,
                    asced_objects=objects,
                )

            if face_empty and person_empty:
                raise ValueError("Annotations must be checked with self.prepare_annotations() func")

        if person_empty:
            person_crop = self.empty_crop

        return (face_crop, person_crop), [ann.age, ann.gender]

    def _get_crop(
        self,
        bbox,
        img,
        asced_objects=None,
        crop_out_color=(0, 0, 0),
    ) -> Tuple[np.ndarray, bool]:

        empty_bbox = False

        xmin, ymin, xmax, ymax = bbox
        assert not (
            ymax - ymin < self.min_size or xmax - xmin < self.min_size
        ), "Annotations must be checked with self.prepare_annotations() func"

        crop = img[ymin:ymax, xmin:xmax]

        if asced_objects:
            # cut off other objects for person crop
            crop, empty_bbox = _cropout_asced_objs(
                asced_objects,
                bbox,
                crop.copy(),
                crop_out_color=crop_out_color,
                min_person_size=self.min_person_size,
                crop_round_tol=self.crop_round_tol,
                min_person_aftercut_ratio=self.min_person_aftercut_ratio,
            )
            if empty_bbox:
                crop = self.empty_crop

        crop = class_letterbox(crop, new_shape=(self.target_size, self.target_size), color=crop_out_color)
        return crop, empty_bbox

    def prepare_annotations(self):

        good_anns: Dict[str, List[PictureInfo]] = {}
        all_associated_objects: Dict[str, Dict[int, List[List[int]]]] = {}

        if not self.with_persons:
            # remove all persons
            for img_path, bboxes in self._ann.items():
                for sample in bboxes:
                    sample.clear_person_bbox()

        # check dataset and collect associated_objects
        verify_images_func = partial(
            verify_images,
            min_size=self.min_size,
            min_person_size=self.min_person_size,
            with_persons=self.with_persons,
            disable_faces=self.disable_faces,
            crop_round_tol=self.crop_round_tol,
            min_person_aftercut_ratio=self.min_person_aftercut_ratio,
            only_age=self.only_age,
        )
        num_threads = min(8, os.cpu_count())

        all_msgs = []
        broken = 0
        skipped = 0
        all_skipped_crops = 0
        desc = "Check annotations..."
        with ThreadPool(num_threads) as pool:
            pbar = tqdm(
                pool.imap_unordered(verify_images_func, list(self._ann.items())),
                desc=desc,
                total=len(self._ann),
            )

            for (img_info, associated_objects, msgs, is_corrupted, is_empty_annotations, skipped_crops) in pbar:
                broken += 1 if is_corrupted else 0
                all_msgs.extend(msgs)
                all_skipped_crops += skipped_crops
                skipped += 1 if is_empty_annotations else 0
                if img_info is not None:
                    img_path, img_samples = img_info
                    good_anns[img_path] = img_samples
                    all_associated_objects.update({img_path: associated_objects})

                pbar.desc = (
                    f"{desc} {skipped} images skipped ({all_skipped_crops} crops are incorrect); "
                    f"{broken} images corrupted"
                )

            pbar.close()

        for msg in all_msgs:
            print(msg)
        print(f"\nLeft images: {len(good_anns)}")

        return good_anns, all_associated_objects


def verify_images(
    img_info,
    min_size: int,
    min_person_size: int,
    with_persons: bool,
    disable_faces: bool,
    crop_round_tol: float,
    min_person_aftercut_ratio: float,
    only_age: bool,
):
    # If crop is too small, if image can not be read or if image does not exist
    # then filter out this sample

    disable_faces = disable_faces and with_persons
    kwargs = dict(
        min_person_size=min_person_size,
        disable_faces=disable_faces,
        with_persons=with_persons,
        crop_round_tol=crop_round_tol,
        min_person_aftercut_ratio=min_person_aftercut_ratio,
        only_age=only_age,
    )

    def bbox_correct(bbox, min_size, im_h, im_w) -> Tuple[bool, List[int]]:
        ymin, ymax, xmin, xmax = _correct_bbox(bbox, im_h, im_w)
        crop_h, crop_w = ymax - ymin, xmax - xmin
        if crop_h < min_size or crop_w < min_size:
            return False, [-1, -1, -1, -1]
        bbox = [xmin, ymin, xmax, ymax]
        return True, bbox

    msgs = []
    skipped_crops = 0
    is_corrupted = False
    is_empty_annotations = False

    img_path: str = img_info[0]
    img_samples: List[PictureInfo] = img_info[1]
    try:
        im_cv = cv2.imread(img_path)
        im_h, im_w = im_cv.shape[:2]
    except Exception:
        msgs.append(f"Can not load image {img_path}")
        is_corrupted = True
        return None, {}, msgs, is_corrupted, is_empty_annotations, skipped_crops

    out_samples: List[PictureInfo] = []
    for sample in img_samples:
        # correct face bbox
        if sample.has_face_bbox:
            is_correct, sample.bbox = bbox_correct(sample.bbox, min_size, im_h, im_w)
            if not is_correct and sample.has_gt(only_age):
                msgs.append("Small face. Passing..")
                skipped_crops += 1

        # correct person bbox
        if sample.has_person_bbox:
            is_correct, sample.person_bbox = bbox_correct(
                sample.person_bbox, max(min_person_size, min_size), im_h, im_w
            )
            if not is_correct and sample.has_gt(only_age):
                msgs.append(f"Small person {img_path}. Passing..")
                skipped_crops += 1

        if sample.has_face_bbox or sample.has_person_bbox:
            out_samples.append(sample)
        elif sample.has_gt(only_age):
            msgs.append("Sample hs no face and no body. Passing..")
            skipped_crops += 1

    # sort that samples with undefined age and gender be the last
    out_samples = sorted(out_samples, key=lambda sample: 1 if not sample.has_gt(only_age) else 0)

    # for each person find other faces and persons bboxes, intersected with it
    associated_objects: Dict[int, List[List[int]]] = find_associated_objects(out_samples, only_age=only_age)

    out_samples, associated_objects, skipped_crops = filter_bad_samples(
        out_samples, associated_objects, im_cv, msgs, skipped_crops, **kwargs
    )

    out_img_info: Optional[Tuple[str, List]] = (img_path, out_samples)
    if len(out_samples) == 0:
        out_img_info = None
        is_empty_annotations = True

    return out_img_info, associated_objects, msgs, is_corrupted, is_empty_annotations, skipped_crops


def filter_bad_samples(
    out_samples: List[PictureInfo],
    associated_objects: dict,
    im_cv: np.ndarray,
    msgs: List[str],
    skipped_crops: int,
    **kwargs,
):
    with_persons, disable_faces, min_person_size, crop_round_tol, min_person_aftercut_ratio, only_age = (
        kwargs["with_persons"],
        kwargs["disable_faces"],
        kwargs["min_person_size"],
        kwargs["crop_round_tol"],
        kwargs["min_person_aftercut_ratio"],
        kwargs["only_age"],
    )

    # left only samples with annotations
    inds = [sample_ind for sample_ind, sample in enumerate(out_samples) if sample.has_gt(only_age)]
    out_samples, associated_objects = _filter_by_ind(out_samples, associated_objects, inds)

    if kwargs["disable_faces"]:
        # clear all faces
        for ind, sample in enumerate(out_samples):
            sample.clear_face_bbox()

        # left only samples with person_bbox
        inds = [sample_ind for sample_ind, sample in enumerate(out_samples) if sample.has_person_bbox]
        out_samples, associated_objects = _filter_by_ind(out_samples, associated_objects, inds)

    if with_persons or disable_faces:
        # check that preprocessing func
        # _cropout_asced_objs() return not empty person_image for each out sample

        inds = []
        for ind, sample in enumerate(out_samples):
            person_empty = True
            if sample.has_person_bbox:
                xmin, ymin, xmax, ymax = sample.person_bbox
                crop = im_cv[ymin:ymax, xmin:xmax]
                # cut off all associated objects from person crop
                _, person_empty = _cropout_asced_objs(
                    associated_objects[ind],
                    sample.person_bbox,
                    crop.copy(),
                    min_person_size=min_person_size,
                    crop_round_tol=crop_round_tol,
                    min_person_aftercut_ratio=min_person_aftercut_ratio,
                )

            if person_empty and not sample.has_face_bbox:
                msgs.append("Small person after preprocessing. Passing..")
                skipped_crops += 1
            else:
                inds.append(ind)
        out_samples, associated_objects = _filter_by_ind(out_samples, associated_objects, inds)

    assert len(associated_objects) == len(out_samples)
    return out_samples, associated_objects, skipped_crops


def _filter_by_ind(out_samples, associated_objects, inds):
    _associated_objects = {}
    _out_samples = []
    for ind, sample in enumerate(out_samples):
        if ind in inds:
            _associated_objects[len(_out_samples)] = associated_objects[ind]
            _out_samples.append(sample)

    return _out_samples, _associated_objects


def find_associated_objects(
    image_samples: List[PictureInfo], iou_thresh=0.0001, only_age=False
) -> Dict[int, List[List[int]]]:
    """
    For each person (which has gt age and gt gender) find other faces and persons bboxes, intersected with it
    """
    associated_objects: Dict[int, List[List[int]]] = {}

    for iindex, image_sample_info in enumerate(image_samples):
        # add own face
        associated_objects[iindex] = [image_sample_info.bbox] if image_sample_info.has_face_bbox else []

        if not image_sample_info.has_person_bbox or not image_sample_info.has_gt(only_age):
            # if sample has not gt => not be used
            continue

        iperson_box = image_sample_info.person_bbox
        for jindex, other_image_sample in enumerate(image_samples):
            if iindex == jindex:
                continue
            if other_image_sample.has_face_bbox:
                jface_bbox = other_image_sample.bbox
                iou = _get_iou(jface_bbox, iperson_box)
                if iou >= iou_thresh:
                    associated_objects[iindex].append(jface_bbox)
            if other_image_sample.has_person_bbox:
                jperson_bbox = other_image_sample.person_bbox
                iou = _get_iou(jperson_bbox, iperson_box)
                if iou >= iou_thresh:
                    associated_objects[iindex].append(jperson_bbox)

    return associated_objects


def _cropout_asced_objs(
    asced_objects,
    person_bbox,
    crop,
    min_person_size,
    crop_round_tol,
    min_person_aftercut_ratio,
    crop_out_color=(0, 0, 0),
):
    empty = False
    xmin, ymin, xmax, ymax = person_bbox

    for a_obj in asced_objects:
        aobj_xmin, aobj_ymin, aobj_xmax, aobj_ymax = a_obj

        aobj_ymin = int(max(aobj_ymin - ymin, 0))
        aobj_xmin = int(max(aobj_xmin - xmin, 0))
        aobj_ymax = int(min(aobj_ymax - ymin, ymax - ymin))
        aobj_xmax = int(min(aobj_xmax - xmin, xmax - xmin))

        crop[aobj_ymin:aobj_ymax, aobj_xmin:aobj_xmax] = crop_out_color

    crop, cropped_ratio = cropout_black_parts(crop, crop_round_tol)
    if (
        crop.shape[0] < min_person_size or crop.shape[1] < min_person_size
    ) or cropped_ratio < min_person_aftercut_ratio:
        crop = None
        empty = True

    return crop, empty


def _correct_bbox(bbox, h, w):
    xmin, ymin, xmax, ymax = bbox
    ymin = min(max(ymin, 0), h)
    ymax = min(max(ymax, 0), h)
    xmin = min(max(xmin, 0), w)
    xmax = min(max(xmax, 0), w)
    return ymin, ymax, xmin, xmax


def _get_iou(bbox1, bbox2):
    xmin1, ymin1, xmax1, ymax1 = bbox1
    xmin2, ymin2, xmax2, ymax2 = bbox2
    iou = IOU(
        [ymin1, xmin1, ymax1, xmax1],
        [ymin2, xmin2, ymax2, xmax2],
    )
    return iou