felixrosberg commited on
Commit
02ed8ba
β€’
1 Parent(s): ea104c0

moved retina face ops to model repo

Browse files
retinaface/__pycache__/anchor.cpython-37.pyc DELETED
Binary file (10.3 kB)
 
retinaface/__pycache__/anchor.cpython-38.pyc DELETED
Binary file (10.4 kB)
 
retinaface/__pycache__/models.cpython-37.pyc DELETED
Binary file (10.7 kB)
 
retinaface/__pycache__/models.cpython-38.pyc DELETED
Binary file (10.4 kB)
 
retinaface/__pycache__/ops.cpython-37.pyc DELETED
Binary file (1.02 kB)
 
retinaface/anchor.py DELETED
@@ -1,296 +0,0 @@
1
- """Anchor utils modified from https://github.com/biubug6/Pytorch_Retinaface"""
2
- import math
3
- import tensorflow as tf
4
- import numpy as np
5
- from itertools import product as product
6
-
7
-
8
- ###############################################################################
9
- # Tensorflow / Numpy Priors #
10
- ###############################################################################
11
- def prior_box(image_sizes, min_sizes, steps, clip=False):
12
- """prior box"""
13
- feature_maps = [
14
- [math.ceil(image_sizes[0] / step), math.ceil(image_sizes[1] / step)]
15
- for step in steps]
16
-
17
- anchors = []
18
- for k, f in enumerate(feature_maps):
19
- for i, j in product(range(f[0]), range(f[1])):
20
- for min_size in min_sizes[k]:
21
- s_kx = min_size / image_sizes[1]
22
- s_ky = min_size / image_sizes[0]
23
- cx = (j + 0.5) * steps[k] / image_sizes[1]
24
- cy = (i + 0.5) * steps[k] / image_sizes[0]
25
- anchors += [cx, cy, s_kx, s_ky]
26
-
27
- output = np.asarray(anchors).reshape([-1, 4])
28
-
29
- if clip:
30
- output = np.clip(output, 0, 1)
31
-
32
- return output
33
-
34
-
35
- def prior_box_tf(image_sizes, min_sizes, steps, clip=False):
36
- """prior box"""
37
- image_sizes = tf.cast(tf.convert_to_tensor(image_sizes), tf.float32)
38
- feature_maps = tf.math.ceil(
39
- tf.reshape(image_sizes, [1, 2]) /
40
- tf.reshape(tf.cast(steps, tf.float32), [-1, 1]))
41
-
42
- anchors = []
43
- for k in range(len(min_sizes)):
44
- grid_x, grid_y = _meshgrid_tf(tf.range(feature_maps[k][1]),
45
- tf.range(feature_maps[k][0]))
46
- cx = (grid_x + 0.5) * steps[k] / image_sizes[1]
47
- cy = (grid_y + 0.5) * steps[k] / image_sizes[0]
48
- cxcy = tf.stack([cx, cy], axis=-1)
49
- cxcy = tf.reshape(cxcy, [-1, 2])
50
- cxcy = tf.repeat(cxcy, repeats=tf.shape(min_sizes[k])[0], axis=0)
51
-
52
- sx = min_sizes[k] / image_sizes[1]
53
- sy = min_sizes[k] / image_sizes[0]
54
- sxsy = tf.stack([sx, sy], 1)
55
- sxsy = tf.repeat(sxsy[tf.newaxis],
56
- repeats=tf.shape(grid_x)[0] * tf.shape(grid_x)[1],
57
- axis=0)
58
- sxsy = tf.reshape(sxsy, [-1, 2])
59
-
60
- anchors.append(tf.concat([cxcy, sxsy], 1))
61
-
62
- output = tf.concat(anchors, axis=0)
63
-
64
- if clip:
65
- output = tf.clip_by_value(output, 0, 1)
66
-
67
- return output
68
-
69
-
70
- def _meshgrid_tf(x, y):
71
- """ workaround solution of the tf.meshgrid() issue:
72
- https://github.com/tensorflow/tensorflow/issues/34470"""
73
- grid_shape = [tf.shape(y)[0], tf.shape(x)[0]]
74
- grid_x = tf.broadcast_to(tf.reshape(x, [1, -1]), grid_shape)
75
- grid_y = tf.broadcast_to(tf.reshape(y, [-1, 1]), grid_shape)
76
- return grid_x, grid_y
77
-
78
-
79
- ###############################################################################
80
- # Tensorflow Encoding #
81
- ###############################################################################
82
- def encode_tf(labels, priors, match_thresh, ignore_thresh,
83
- variances=[0.1, 0.2]):
84
- """tensorflow encoding"""
85
- assert ignore_thresh <= match_thresh
86
- priors = tf.cast(priors, tf.float32)
87
- bbox = labels[:, :4]
88
- landm = labels[:, 4:-1]
89
- landm_valid = labels[:, -1] # 1: with landm, 0: w/o landm.
90
-
91
- # jaccard index
92
- overlaps = _jaccard(bbox, _point_form(priors))
93
-
94
- # (Bipartite Matching)
95
- # [num_objects] best prior for each ground truth
96
- best_prior_overlap, best_prior_idx = tf.math.top_k(overlaps, k=1)
97
- best_prior_overlap = best_prior_overlap[:, 0]
98
- best_prior_idx = best_prior_idx[:, 0]
99
-
100
- # [num_priors] best ground truth for each prior
101
- overlaps_t = tf.transpose(overlaps)
102
- best_truth_overlap, best_truth_idx = tf.math.top_k(overlaps_t, k=1)
103
- best_truth_overlap = best_truth_overlap[:, 0]
104
- best_truth_idx = best_truth_idx[:, 0]
105
-
106
- # ensure best prior
107
- def _loop_body(i, bt_idx, bt_overlap):
108
- bp_mask = tf.one_hot(best_prior_idx[i], tf.shape(bt_idx)[0])
109
- bp_mask_int = tf.cast(bp_mask, tf.int32)
110
- new_bt_idx = bt_idx * (1 - bp_mask_int) + bp_mask_int * i
111
- bp_mask_float = tf.cast(bp_mask, tf.float32)
112
- new_bt_overlap = bt_overlap * (1 - bp_mask_float) + bp_mask_float * 2
113
- return tf.cond(best_prior_overlap[i] > match_thresh,
114
- lambda: (i + 1, new_bt_idx, new_bt_overlap),
115
- lambda: (i + 1, bt_idx, bt_overlap))
116
- _, best_truth_idx, best_truth_overlap = tf.while_loop(
117
- lambda i, bt_idx, bt_overlap: tf.less(i, tf.shape(best_prior_idx)[0]),
118
- _loop_body, [tf.constant(0), best_truth_idx, best_truth_overlap])
119
-
120
- matches_bbox = tf.gather(bbox, best_truth_idx) # [num_priors, 4]
121
- matches_landm = tf.gather(landm, best_truth_idx) # [num_priors, 10]
122
- matches_landm_v = tf.gather(landm_valid, best_truth_idx) # [num_priors]
123
-
124
- loc_t = _encode_bbox(matches_bbox, priors, variances)
125
- landm_t = _encode_landm(matches_landm, priors, variances)
126
- landm_valid_t = tf.cast(matches_landm_v > 0, tf.float32)
127
- conf_t = tf.cast(best_truth_overlap > match_thresh, tf.float32)
128
- conf_t = tf.where(
129
- tf.logical_and(best_truth_overlap < match_thresh,
130
- best_truth_overlap > ignore_thresh),
131
- tf.ones_like(conf_t) * -1, conf_t) # 1: pos, 0: neg, -1: ignore
132
-
133
- return tf.concat([loc_t, landm_t, landm_valid_t[..., tf.newaxis],
134
- conf_t[..., tf.newaxis]], axis=1)
135
-
136
-
137
- def _encode_bbox(matched, priors, variances):
138
- """Encode the variances from the priorbox layers into the ground truth
139
- boxes we have matched (based on jaccard overlap) with the prior boxes.
140
- Args:
141
- matched: (tensor) Coords of ground truth for each prior in point-form
142
- Shape: [num_priors, 4].
143
- priors: (tensor) Prior boxes in center-offset form
144
- Shape: [num_priors,4].
145
- variances: (list[float]) Variances of priorboxes
146
- Return:
147
- encoded boxes (tensor), Shape: [num_priors, 4]
148
- """
149
-
150
- # dist b/t match center and prior's center
151
- g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2]
152
- # encode variance
153
- g_cxcy /= (variances[0] * priors[:, 2:])
154
- # match wh / prior wh
155
- g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
156
- g_wh = tf.math.log(g_wh) / variances[1]
157
- # return target for smooth_l1_loss
158
- return tf.concat([g_cxcy, g_wh], 1) # [num_priors,4]
159
-
160
-
161
- def _encode_landm(matched, priors, variances):
162
- """Encode the variances from the priorbox layers into the ground truth
163
- boxes we have matched (based on jaccard overlap) with the prior boxes.
164
- Args:
165
- matched: (tensor) Coords of ground truth for each prior in point-form
166
- Shape: [num_priors, 10].
167
- priors: (tensor) Prior boxes in center-offset form
168
- Shape: [num_priors,4].
169
- variances: (list[float]) Variances of priorboxes
170
- Return:
171
- encoded landm (tensor), Shape: [num_priors, 10]
172
- """
173
-
174
- # dist b/t match center and prior's center
175
- matched = tf.reshape(matched, [tf.shape(matched)[0], 5, 2])
176
- priors = tf.broadcast_to(
177
- tf.expand_dims(priors, 1), [tf.shape(matched)[0], 5, 4])
178
- g_cxcy = matched[:, :, :2] - priors[:, :, :2]
179
- # encode variance
180
- g_cxcy /= (variances[0] * priors[:, :, 2:])
181
- # g_cxcy /= priors[:, :, 2:]
182
- g_cxcy = tf.reshape(g_cxcy, [tf.shape(g_cxcy)[0], -1])
183
- # return target for smooth_l1_loss
184
- return g_cxcy
185
-
186
-
187
- def _point_form(boxes):
188
- """ Convert prior_boxes to (xmin, ymin, xmax, ymax)
189
- representation for comparison to point form ground truth data.
190
- Args:
191
- boxes: (tensor) center-size default boxes from priorbox layers.
192
- Return:
193
- boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
194
- """
195
- return tf.concat((boxes[:, :2] - boxes[:, 2:] / 2,
196
- boxes[:, :2] + boxes[:, 2:] / 2), axis=1)
197
-
198
-
199
- def _intersect(box_a, box_b):
200
- """ We resize both tensors to [A,B,2]:
201
- [A,2] -> [A,1,2] -> [A,B,2]
202
- [B,2] -> [1,B,2] -> [A,B,2]
203
- Then we compute the area of intersect between box_a and box_b.
204
- Args:
205
- box_a: (tensor) bounding boxes, Shape: [A,4].
206
- box_b: (tensor) bounding boxes, Shape: [B,4].
207
- Return:
208
- (tensor) intersection area, Shape: [A,B].
209
- """
210
- A = tf.shape(box_a)[0]
211
- B = tf.shape(box_b)[0]
212
- max_xy = tf.minimum(
213
- tf.broadcast_to(tf.expand_dims(box_a[:, 2:], 1), [A, B, 2]),
214
- tf.broadcast_to(tf.expand_dims(box_b[:, 2:], 0), [A, B, 2]))
215
- min_xy = tf.maximum(
216
- tf.broadcast_to(tf.expand_dims(box_a[:, :2], 1), [A, B, 2]),
217
- tf.broadcast_to(tf.expand_dims(box_b[:, :2], 0), [A, B, 2]))
218
- inter = tf.maximum((max_xy - min_xy), tf.zeros_like(max_xy - min_xy))
219
- return inter[:, :, 0] * inter[:, :, 1]
220
-
221
-
222
- def _jaccard(box_a, box_b):
223
- """Compute the jaccard overlap of two sets of boxes. The jaccard overlap
224
- is simply the intersection over union of two boxes. Here we operate on
225
- ground truth boxes and default boxes.
226
- E.g.:
227
- A ∩ B / A βˆͺ B = A ∩ B / (area(A) + area(B) - A ∩ B)
228
- Args:
229
- box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
230
- box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
231
- Return:
232
- jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
233
- """
234
- inter = _intersect(box_a, box_b)
235
- area_a = tf.broadcast_to(
236
- tf.expand_dims(
237
- (box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1]), 1),
238
- tf.shape(inter)) # [A,B]
239
- area_b = tf.broadcast_to(
240
- tf.expand_dims(
241
- (box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1]), 0),
242
- tf.shape(inter)) # [A,B]
243
- union = area_a + area_b - inter
244
- return inter / union # [A,B]
245
-
246
-
247
- ###############################################################################
248
- # Tensorflow Decoding #
249
- ###############################################################################
250
- def decode_tf(labels, priors, variances=[0.1, 0.2]):
251
- """tensorflow decoding"""
252
- bbox = _decode_bbox(labels[:, :4], priors, variances)
253
- landm = _decode_landm(labels[:, 4:14], priors, variances)
254
- landm_valid = labels[:, 14][:, tf.newaxis]
255
- conf = labels[:, 15][:, tf.newaxis]
256
-
257
- return tf.concat([bbox, landm, landm_valid, conf], axis=1)
258
-
259
-
260
- def _decode_bbox(pre, priors, variances=[0.1, 0.2]):
261
- """Decode locations from predictions using priors to undo
262
- the encoding we did for offset regression at train time.
263
- Args:
264
- pre (tensor): location predictions for loc layers,
265
- Shape: [num_priors,4]
266
- priors (tensor): Prior boxes in center-offset form.
267
- Shape: [num_priors,4].
268
- variances: (list[float]) Variances of priorboxes
269
- Return:
270
- decoded bounding box predictions
271
- """
272
- centers = priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:]
273
- sides = priors[:, 2:] * tf.math.exp(pre[:, 2:] * variances[1])
274
-
275
- return tf.concat([centers - sides / 2, centers + sides / 2], axis=1)
276
-
277
-
278
- def _decode_landm(pre, priors, variances=[0.1, 0.2]):
279
- """Decode landm from predictions using priors to undo
280
- the encoding we did for offset regression at train time.
281
- Args:
282
- pre (tensor): landm predictions for loc layers,
283
- Shape: [num_priors,10]
284
- priors (tensor): Prior boxes in center-offset form.
285
- Shape: [num_priors,4].
286
- variances: (list[float]) Variances of priorboxes
287
- Return:
288
- decoded landm predictions
289
- """
290
- landms = tf.concat(
291
- [priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
292
- priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
293
- priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
294
- priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
295
- priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:]], axis=1)
296
- return landms
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
retinaface/models.py DELETED
@@ -1,301 +0,0 @@
1
- import tensorflow as tf
2
- from tensorflow.keras import Model
3
- from tensorflow.keras.applications import MobileNetV2, ResNet50
4
- from tensorflow.keras.layers import Input, Conv2D, ReLU, LeakyReLU
5
- from retinaface.anchor import decode_tf, prior_box_tf
6
-
7
-
8
- def _regularizer(weights_decay):
9
- """l2 regularizer"""
10
- return tf.keras.regularizers.l2(weights_decay)
11
-
12
-
13
- def _kernel_init(scale=1.0, seed=None):
14
- """He normal initializer"""
15
- return tf.keras.initializers.he_normal()
16
-
17
-
18
- class BatchNormalization(tf.keras.layers.BatchNormalization):
19
- """Make trainable=False freeze BN for real (the og version is sad).
20
- ref: https://github.com/zzh8829/yolov3-tf2
21
- """
22
- def __init__(self, axis=-1, momentum=0.9, epsilon=1e-5, center=True,
23
- scale=True, name=None, **kwargs):
24
- super(BatchNormalization, self).__init__(
25
- axis=axis, momentum=momentum, epsilon=epsilon, center=center,
26
- scale=scale, name=name, **kwargs)
27
-
28
- def call(self, x, training=False):
29
- if training is None:
30
- training = tf.constant(False)
31
- training = tf.logical_and(training, self.trainable)
32
-
33
- return super().call(x, training)
34
-
35
-
36
- def Backbone(backbone_type='ResNet50', use_pretrain=True):
37
- """Backbone Model"""
38
- weights = None
39
- if use_pretrain:
40
- weights = 'imagenet'
41
-
42
- def backbone(x):
43
- if backbone_type == 'ResNet50':
44
- extractor = ResNet50(
45
- input_shape=x.shape[1:], include_top=False, weights=weights)
46
- pick_layer1 = 80 # [80, 80, 512]
47
- pick_layer2 = 142 # [40, 40, 1024]
48
- pick_layer3 = 174 # [20, 20, 2048]
49
- preprocess = tf.keras.applications.resnet.preprocess_input
50
- elif backbone_type == 'MobileNetV2':
51
- extractor = MobileNetV2(
52
- input_shape=x.shape[1:], include_top=False, weights=weights)
53
- pick_layer1 = 54 # [80, 80, 32]
54
- pick_layer2 = 116 # [40, 40, 96]
55
- pick_layer3 = 143 # [20, 20, 160]
56
- preprocess = tf.keras.applications.mobilenet_v2.preprocess_input
57
- else:
58
- raise NotImplementedError(
59
- 'Backbone type {} is not recognized.'.format(backbone_type))
60
-
61
- return Model(extractor.input,
62
- (extractor.layers[pick_layer1].output,
63
- extractor.layers[pick_layer2].output,
64
- extractor.layers[pick_layer3].output),
65
- name=backbone_type + '_extrator')(preprocess(x))
66
-
67
- return backbone
68
-
69
-
70
- class ConvUnit(tf.keras.layers.Layer):
71
- """Conv + BN + Act"""
72
- def __init__(self, f, k, s, wd, act=None, **kwargs):
73
- super(ConvUnit, self).__init__(**kwargs)
74
- self.conv = Conv2D(filters=f, kernel_size=k, strides=s, padding='same',
75
- kernel_initializer=_kernel_init(),
76
- kernel_regularizer=_regularizer(wd),
77
- use_bias=False)
78
- self.bn = BatchNormalization()
79
-
80
- if act is None:
81
- self.act_fn = tf.identity
82
- elif act == 'relu':
83
- self.act_fn = ReLU()
84
- elif act == 'lrelu':
85
- self.act_fn = LeakyReLU(0.1)
86
- else:
87
- raise NotImplementedError(
88
- 'Activation function type {} is not recognized.'.format(act))
89
-
90
- def call(self, x):
91
- return self.act_fn(self.bn(self.conv(x)))
92
-
93
-
94
- class FPN(tf.keras.layers.Layer):
95
- """Feature Pyramid Network"""
96
- def __init__(self, out_ch, wd, **kwargs):
97
- super(FPN, self).__init__(**kwargs)
98
- act = 'relu'
99
- self.out_ch = out_ch
100
- self.wd = wd
101
- if (out_ch <= 64):
102
- act = 'lrelu'
103
-
104
- self.output1 = ConvUnit(f=out_ch, k=1, s=1, wd=wd, act=act)
105
- self.output2 = ConvUnit(f=out_ch, k=1, s=1, wd=wd, act=act)
106
- self.output3 = ConvUnit(f=out_ch, k=1, s=1, wd=wd, act=act)
107
- self.merge1 = ConvUnit(f=out_ch, k=3, s=1, wd=wd, act=act)
108
- self.merge2 = ConvUnit(f=out_ch, k=3, s=1, wd=wd, act=act)
109
-
110
- def call(self, x):
111
- output1 = self.output1(x[0]) # [80, 80, out_ch]
112
- output2 = self.output2(x[1]) # [40, 40, out_ch]
113
- output3 = self.output3(x[2]) # [20, 20, out_ch]
114
-
115
- up_h, up_w = tf.shape(output2)[1], tf.shape(output2)[2]
116
- up3 = tf.image.resize(output3, [up_h, up_w], method='nearest')
117
- output2 = output2 + up3
118
- output2 = self.merge2(output2)
119
-
120
- up_h, up_w = tf.shape(output1)[1], tf.shape(output1)[2]
121
- up2 = tf.image.resize(output2, [up_h, up_w], method='nearest')
122
- output1 = output1 + up2
123
- output1 = self.merge1(output1)
124
-
125
- return output1, output2, output3
126
-
127
- def get_config(self):
128
- config = {
129
- 'out_ch': self.out_ch,
130
- 'wd': self.wd,
131
- }
132
- base_config = super(FPN, self).get_config()
133
- return dict(list(base_config.items()) + list(config.items()))
134
-
135
-
136
- class SSH(tf.keras.layers.Layer):
137
- """Single Stage Headless Layer"""
138
- def __init__(self, out_ch, wd, **kwargs):
139
- super(SSH, self).__init__(**kwargs)
140
- assert out_ch % 4 == 0
141
- self.out_ch = out_ch
142
- self.wd = wd
143
- act = 'relu'
144
- if (out_ch <= 64):
145
- act = 'lrelu'
146
-
147
- self.conv_3x3 = ConvUnit(f=out_ch // 2, k=3, s=1, wd=wd, act=None)
148
-
149
- self.conv_5x5_1 = ConvUnit(f=out_ch // 4, k=3, s=1, wd=wd, act=act)
150
- self.conv_5x5_2 = ConvUnit(f=out_ch // 4, k=3, s=1, wd=wd, act=None)
151
-
152
- self.conv_7x7_2 = ConvUnit(f=out_ch // 4, k=3, s=1, wd=wd, act=act)
153
- self.conv_7x7_3 = ConvUnit(f=out_ch // 4, k=3, s=1, wd=wd, act=None)
154
-
155
- self.relu = ReLU()
156
-
157
- def call(self, x):
158
- conv_3x3 = self.conv_3x3(x)
159
-
160
- conv_5x5_1 = self.conv_5x5_1(x)
161
- conv_5x5 = self.conv_5x5_2(conv_5x5_1)
162
-
163
- conv_7x7_2 = self.conv_7x7_2(conv_5x5_1)
164
- conv_7x7 = self.conv_7x7_3(conv_7x7_2)
165
-
166
- output = tf.concat([conv_3x3, conv_5x5, conv_7x7], axis=3)
167
- output = self.relu(output)
168
-
169
- return output
170
-
171
- def get_config(self):
172
- config = {
173
- 'out_ch': self.out_ch,
174
- 'wd': self.wd,
175
- }
176
- base_config = super(SSH, self).get_config()
177
- return dict(list(base_config.items()) + list(config.items()))
178
-
179
-
180
- class BboxHead(tf.keras.layers.Layer):
181
- """Bbox Head Layer"""
182
- def __init__(self, num_anchor, wd, **kwargs):
183
- super(BboxHead, self).__init__(**kwargs)
184
- self.num_anchor = num_anchor
185
- self.wd = wd
186
- self.conv = Conv2D(filters=num_anchor * 4, kernel_size=1, strides=1)
187
-
188
- def call(self, x):
189
- h, w = tf.shape(x)[1], tf.shape(x)[2]
190
- x = self.conv(x)
191
-
192
- return tf.reshape(x, [-1, h * w * self.num_anchor, 4])
193
-
194
- def get_config(self):
195
- config = {
196
- 'num_anchor': self.num_anchor,
197
- 'wd': self.wd,
198
- }
199
- base_config = super(BboxHead, self).get_config()
200
- return dict(list(base_config.items()) + list(config.items()))
201
-
202
-
203
- class LandmarkHead(tf.keras.layers.Layer):
204
- """Landmark Head Layer"""
205
- def __init__(self, num_anchor, wd, name='LandmarkHead', **kwargs):
206
- super(LandmarkHead, self).__init__(name=name, **kwargs)
207
- self.num_anchor = num_anchor
208
- self.wd = wd
209
- self.conv = Conv2D(filters=num_anchor * 10, kernel_size=1, strides=1)
210
-
211
- def call(self, x):
212
- h, w = tf.shape(x)[1], tf.shape(x)[2]
213
- x = self.conv(x)
214
-
215
- return tf.reshape(x, [-1, h * w * self.num_anchor, 10])
216
-
217
- def get_config(self):
218
- config = {
219
- 'num_anchor': self.num_anchor,
220
- 'wd': self.wd,
221
- }
222
- base_config = super(LandmarkHead, self).get_config()
223
- return dict(list(base_config.items()) + list(config.items()))
224
-
225
-
226
- class ClassHead(tf.keras.layers.Layer):
227
- """Class Head Layer"""
228
- def __init__(self, num_anchor, wd, name='ClassHead', **kwargs):
229
- super(ClassHead, self).__init__(name=name, **kwargs)
230
- self.num_anchor = num_anchor
231
- self.wd = wd
232
- self.conv = Conv2D(filters=num_anchor * 2, kernel_size=1, strides=1)
233
-
234
- def call(self, x):
235
- h, w = tf.shape(x)[1], tf.shape(x)[2]
236
- x = self.conv(x)
237
-
238
- return tf.reshape(x, [-1, h * w * self.num_anchor, 2])
239
-
240
- def get_config(self):
241
- config = {
242
- 'num_anchor': self.num_anchor,
243
- 'wd': self.wd,
244
- }
245
- base_config = super(ClassHead, self).get_config()
246
- return dict(list(base_config.items()) + list(config.items()))
247
-
248
-
249
- def RetinaFaceModel(cfg, training=False, iou_th=0.4, score_th=0.02,
250
- name='RetinaFaceModel'):
251
- """Retina Face Model"""
252
- input_size = cfg['input_size'] if training else None
253
- wd = cfg['weights_decay']
254
- out_ch = cfg['out_channel']
255
- num_anchor = len(cfg['min_sizes'][0])
256
- backbone_type = cfg['backbone_type']
257
-
258
- # define model
259
- x = inputs = Input([input_size, input_size, 3], name='input_image')
260
-
261
- x = Backbone(backbone_type=backbone_type)(x)
262
-
263
- fpn = FPN(out_ch=out_ch, wd=wd)(x)
264
-
265
- features = [SSH(out_ch=out_ch, wd=wd)(f)
266
- for i, f in enumerate(fpn)]
267
-
268
- bbox_regressions = tf.concat(
269
- [BboxHead(num_anchor, wd=wd)(f)
270
- for i, f in enumerate(features)], axis=1)
271
- landm_regressions = tf.concat(
272
- [LandmarkHead(num_anchor, wd=wd, name=f'LandmarkHead_{i}')(f)
273
- for i, f in enumerate(features)], axis=1)
274
- classifications = tf.concat(
275
- [ClassHead(num_anchor, wd=wd, name=f'ClassHead_{i}')(f)
276
- for i, f in enumerate(features)], axis=1)
277
-
278
- classifications = tf.keras.layers.Softmax(axis=-1)(classifications)
279
-
280
- if training:
281
- out = (bbox_regressions, landm_regressions, classifications)
282
- else:
283
- # only for batch size 1
284
- preds = tf.concat( # [bboxes, landms, landms_valid, conf]
285
- [bbox_regressions[0],
286
- landm_regressions[0],
287
- tf.ones_like(classifications[0, :, 0][..., tf.newaxis]),
288
- classifications[0, :, 1][..., tf.newaxis]], 1)
289
- priors = prior_box_tf((tf.shape(inputs)[1], tf.shape(inputs)[2]), cfg['min_sizes'], cfg['steps'], cfg['clip'])
290
- decode_preds = decode_tf(preds, priors, cfg['variances'])
291
-
292
- selected_indices = tf.image.non_max_suppression(
293
- boxes=decode_preds[:, :4],
294
- scores=decode_preds[:, -1],
295
- max_output_size=tf.shape(decode_preds)[0],
296
- iou_threshold=iou_th,
297
- score_threshold=score_th)
298
-
299
- out = tf.gather(decode_preds, selected_indices)
300
-
301
- return Model(inputs, out, name=name), Model(inputs, [bbox_regressions, landm_regressions, classifications], name=name + '_bb_only')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
retinaface/ops.py DELETED
@@ -1,27 +0,0 @@
1
- from retinaface.anchor import decode_tf, prior_box_tf
2
- import tensorflow as tf
3
-
4
-
5
- def extract_detections(bbox_regressions, landm_regressions, classifications, image_sizes, iou_th=0.4, score_th=0.02):
6
- min_sizes = [[16, 32], [64, 128], [256, 512]]
7
- steps = [8, 16, 32]
8
- variances = [0.1, 0.2]
9
- preds = tf.concat( # [bboxes, landms, landms_valid, conf]
10
- [bbox_regressions,
11
- landm_regressions,
12
- tf.ones_like(classifications[:, 0][..., tf.newaxis]),
13
- classifications[:, 1][..., tf.newaxis]], 1)
14
- priors = prior_box_tf(image_sizes, min_sizes, steps, False)
15
- decode_preds = decode_tf(preds, priors, variances)
16
-
17
- selected_indices = tf.image.non_max_suppression(
18
- boxes=decode_preds[:, :4],
19
- scores=decode_preds[:, -1],
20
- max_output_size=tf.shape(decode_preds)[0],
21
- iou_threshold=iou_th,
22
- score_threshold=score_th)
23
-
24
- out = tf.gather(decode_preds, selected_indices)
25
-
26
- return out
27
-