yotamsapi commited on
Commit
2984b18
β€’
1 Parent(s): d9edcea

Create retinaface/anchor.py

Browse files
Files changed (1) hide show
  1. retinaface/anchor.py +296 -0
retinaface/anchor.py ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Anchor utils modified from https://github.com/biubug6/Pytorch_Retinaface"""
2
+ import math
3
+ import tensorflow as tf
4
+ import numpy as np
5
+ from itertools import product as product
6
+
7
+
8
+ ###############################################################################
9
+ # Tensorflow / Numpy Priors #
10
+ ###############################################################################
11
+ def prior_box(image_sizes, min_sizes, steps, clip=False):
12
+ """prior box"""
13
+ feature_maps = [
14
+ [math.ceil(image_sizes[0] / step), math.ceil(image_sizes[1] / step)]
15
+ for step in steps]
16
+
17
+ anchors = []
18
+ for k, f in enumerate(feature_maps):
19
+ for i, j in product(range(f[0]), range(f[1])):
20
+ for min_size in min_sizes[k]:
21
+ s_kx = min_size / image_sizes[1]
22
+ s_ky = min_size / image_sizes[0]
23
+ cx = (j + 0.5) * steps[k] / image_sizes[1]
24
+ cy = (i + 0.5) * steps[k] / image_sizes[0]
25
+ anchors += [cx, cy, s_kx, s_ky]
26
+
27
+ output = np.asarray(anchors).reshape([-1, 4])
28
+
29
+ if clip:
30
+ output = np.clip(output, 0, 1)
31
+
32
+ return output
33
+
34
+
35
+ def prior_box_tf(image_sizes, min_sizes, steps, clip=False):
36
+ """prior box"""
37
+ image_sizes = tf.cast(tf.convert_to_tensor(image_sizes), tf.float32)
38
+ feature_maps = tf.math.ceil(
39
+ tf.reshape(image_sizes, [1, 2]) /
40
+ tf.reshape(tf.cast(steps, tf.float32), [-1, 1]))
41
+
42
+ anchors = []
43
+ for k in range(len(min_sizes)):
44
+ grid_x, grid_y = _meshgrid_tf(tf.range(feature_maps[k][1]),
45
+ tf.range(feature_maps[k][0]))
46
+ cx = (grid_x + 0.5) * steps[k] / image_sizes[1]
47
+ cy = (grid_y + 0.5) * steps[k] / image_sizes[0]
48
+ cxcy = tf.stack([cx, cy], axis=-1)
49
+ cxcy = tf.reshape(cxcy, [-1, 2])
50
+ cxcy = tf.repeat(cxcy, repeats=tf.shape(min_sizes[k])[0], axis=0)
51
+
52
+ sx = min_sizes[k] / image_sizes[1]
53
+ sy = min_sizes[k] / image_sizes[0]
54
+ sxsy = tf.stack([sx, sy], 1)
55
+ sxsy = tf.repeat(sxsy[tf.newaxis],
56
+ repeats=tf.shape(grid_x)[0] * tf.shape(grid_x)[1],
57
+ axis=0)
58
+ sxsy = tf.reshape(sxsy, [-1, 2])
59
+
60
+ anchors.append(tf.concat([cxcy, sxsy], 1))
61
+
62
+ output = tf.concat(anchors, axis=0)
63
+
64
+ if clip:
65
+ output = tf.clip_by_value(output, 0, 1)
66
+
67
+ return output
68
+
69
+
70
+ def _meshgrid_tf(x, y):
71
+ """ workaround solution of the tf.meshgrid() issue:
72
+ https://github.com/tensorflow/tensorflow/issues/34470"""
73
+ grid_shape = [tf.shape(y)[0], tf.shape(x)[0]]
74
+ grid_x = tf.broadcast_to(tf.reshape(x, [1, -1]), grid_shape)
75
+ grid_y = tf.broadcast_to(tf.reshape(y, [-1, 1]), grid_shape)
76
+ return grid_x, grid_y
77
+
78
+
79
+ ###############################################################################
80
+ # Tensorflow Encoding #
81
+ ###############################################################################
82
+ def encode_tf(labels, priors, match_thresh, ignore_thresh,
83
+ variances=[0.1, 0.2]):
84
+ """tensorflow encoding"""
85
+ assert ignore_thresh <= match_thresh
86
+ priors = tf.cast(priors, tf.float32)
87
+ bbox = labels[:, :4]
88
+ landm = labels[:, 4:-1]
89
+ landm_valid = labels[:, -1] # 1: with landm, 0: w/o landm.
90
+
91
+ # jaccard index
92
+ overlaps = _jaccard(bbox, _point_form(priors))
93
+
94
+ # (Bipartite Matching)
95
+ # [num_objects] best prior for each ground truth
96
+ best_prior_overlap, best_prior_idx = tf.math.top_k(overlaps, k=1)
97
+ best_prior_overlap = best_prior_overlap[:, 0]
98
+ best_prior_idx = best_prior_idx[:, 0]
99
+
100
+ # [num_priors] best ground truth for each prior
101
+ overlaps_t = tf.transpose(overlaps)
102
+ best_truth_overlap, best_truth_idx = tf.math.top_k(overlaps_t, k=1)
103
+ best_truth_overlap = best_truth_overlap[:, 0]
104
+ best_truth_idx = best_truth_idx[:, 0]
105
+
106
+ # ensure best prior
107
+ def _loop_body(i, bt_idx, bt_overlap):
108
+ bp_mask = tf.one_hot(best_prior_idx[i], tf.shape(bt_idx)[0])
109
+ bp_mask_int = tf.cast(bp_mask, tf.int32)
110
+ new_bt_idx = bt_idx * (1 - bp_mask_int) + bp_mask_int * i
111
+ bp_mask_float = tf.cast(bp_mask, tf.float32)
112
+ new_bt_overlap = bt_overlap * (1 - bp_mask_float) + bp_mask_float * 2
113
+ return tf.cond(best_prior_overlap[i] > match_thresh,
114
+ lambda: (i + 1, new_bt_idx, new_bt_overlap),
115
+ lambda: (i + 1, bt_idx, bt_overlap))
116
+ _, best_truth_idx, best_truth_overlap = tf.while_loop(
117
+ lambda i, bt_idx, bt_overlap: tf.less(i, tf.shape(best_prior_idx)[0]),
118
+ _loop_body, [tf.constant(0), best_truth_idx, best_truth_overlap])
119
+
120
+ matches_bbox = tf.gather(bbox, best_truth_idx) # [num_priors, 4]
121
+ matches_landm = tf.gather(landm, best_truth_idx) # [num_priors, 10]
122
+ matches_landm_v = tf.gather(landm_valid, best_truth_idx) # [num_priors]
123
+
124
+ loc_t = _encode_bbox(matches_bbox, priors, variances)
125
+ landm_t = _encode_landm(matches_landm, priors, variances)
126
+ landm_valid_t = tf.cast(matches_landm_v > 0, tf.float32)
127
+ conf_t = tf.cast(best_truth_overlap > match_thresh, tf.float32)
128
+ conf_t = tf.where(
129
+ tf.logical_and(best_truth_overlap < match_thresh,
130
+ best_truth_overlap > ignore_thresh),
131
+ tf.ones_like(conf_t) * -1, conf_t) # 1: pos, 0: neg, -1: ignore
132
+
133
+ return tf.concat([loc_t, landm_t, landm_valid_t[..., tf.newaxis],
134
+ conf_t[..., tf.newaxis]], axis=1)
135
+
136
+
137
+ def _encode_bbox(matched, priors, variances):
138
+ """Encode the variances from the priorbox layers into the ground truth
139
+ boxes we have matched (based on jaccard overlap) with the prior boxes.
140
+ Args:
141
+ matched: (tensor) Coords of ground truth for each prior in point-form
142
+ Shape: [num_priors, 4].
143
+ priors: (tensor) Prior boxes in center-offset form
144
+ Shape: [num_priors,4].
145
+ variances: (list[float]) Variances of priorboxes
146
+ Return:
147
+ encoded boxes (tensor), Shape: [num_priors, 4]
148
+ """
149
+
150
+ # dist b/t match center and prior's center
151
+ g_cxcy = (matched[:, :2] + matched[:, 2:]) / 2 - priors[:, :2]
152
+ # encode variance
153
+ g_cxcy /= (variances[0] * priors[:, 2:])
154
+ # match wh / prior wh
155
+ g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]
156
+ g_wh = tf.math.log(g_wh) / variances[1]
157
+ # return target for smooth_l1_loss
158
+ return tf.concat([g_cxcy, g_wh], 1) # [num_priors,4]
159
+
160
+
161
+ def _encode_landm(matched, priors, variances):
162
+ """Encode the variances from the priorbox layers into the ground truth
163
+ boxes we have matched (based on jaccard overlap) with the prior boxes.
164
+ Args:
165
+ matched: (tensor) Coords of ground truth for each prior in point-form
166
+ Shape: [num_priors, 10].
167
+ priors: (tensor) Prior boxes in center-offset form
168
+ Shape: [num_priors,4].
169
+ variances: (list[float]) Variances of priorboxes
170
+ Return:
171
+ encoded landm (tensor), Shape: [num_priors, 10]
172
+ """
173
+
174
+ # dist b/t match center and prior's center
175
+ matched = tf.reshape(matched, [tf.shape(matched)[0], 5, 2])
176
+ priors = tf.broadcast_to(
177
+ tf.expand_dims(priors, 1), [tf.shape(matched)[0], 5, 4])
178
+ g_cxcy = matched[:, :, :2] - priors[:, :, :2]
179
+ # encode variance
180
+ g_cxcy /= (variances[0] * priors[:, :, 2:])
181
+ # g_cxcy /= priors[:, :, 2:]
182
+ g_cxcy = tf.reshape(g_cxcy, [tf.shape(g_cxcy)[0], -1])
183
+ # return target for smooth_l1_loss
184
+ return g_cxcy
185
+
186
+
187
+ def _point_form(boxes):
188
+ """ Convert prior_boxes to (xmin, ymin, xmax, ymax)
189
+ representation for comparison to point form ground truth data.
190
+ Args:
191
+ boxes: (tensor) center-size default boxes from priorbox layers.
192
+ Return:
193
+ boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.
194
+ """
195
+ return tf.concat((boxes[:, :2] - boxes[:, 2:] / 2,
196
+ boxes[:, :2] + boxes[:, 2:] / 2), axis=1)
197
+
198
+
199
+ def _intersect(box_a, box_b):
200
+ """ We resize both tensors to [A,B,2]:
201
+ [A,2] -> [A,1,2] -> [A,B,2]
202
+ [B,2] -> [1,B,2] -> [A,B,2]
203
+ Then we compute the area of intersect between box_a and box_b.
204
+ Args:
205
+ box_a: (tensor) bounding boxes, Shape: [A,4].
206
+ box_b: (tensor) bounding boxes, Shape: [B,4].
207
+ Return:
208
+ (tensor) intersection area, Shape: [A,B].
209
+ """
210
+ A = tf.shape(box_a)[0]
211
+ B = tf.shape(box_b)[0]
212
+ max_xy = tf.minimum(
213
+ tf.broadcast_to(tf.expand_dims(box_a[:, 2:], 1), [A, B, 2]),
214
+ tf.broadcast_to(tf.expand_dims(box_b[:, 2:], 0), [A, B, 2]))
215
+ min_xy = tf.maximum(
216
+ tf.broadcast_to(tf.expand_dims(box_a[:, :2], 1), [A, B, 2]),
217
+ tf.broadcast_to(tf.expand_dims(box_b[:, :2], 0), [A, B, 2]))
218
+ inter = tf.maximum((max_xy - min_xy), tf.zeros_like(max_xy - min_xy))
219
+ return inter[:, :, 0] * inter[:, :, 1]
220
+
221
+
222
+ def _jaccard(box_a, box_b):
223
+ """Compute the jaccard overlap of two sets of boxes. The jaccard overlap
224
+ is simply the intersection over union of two boxes. Here we operate on
225
+ ground truth boxes and default boxes.
226
+ E.g.:
227
+ A ∩ B / A βˆͺ B = A ∩ B / (area(A) + area(B) - A ∩ B)
228
+ Args:
229
+ box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]
230
+ box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]
231
+ Return:
232
+ jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]
233
+ """
234
+ inter = _intersect(box_a, box_b)
235
+ area_a = tf.broadcast_to(
236
+ tf.expand_dims(
237
+ (box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1]), 1),
238
+ tf.shape(inter)) # [A,B]
239
+ area_b = tf.broadcast_to(
240
+ tf.expand_dims(
241
+ (box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1]), 0),
242
+ tf.shape(inter)) # [A,B]
243
+ union = area_a + area_b - inter
244
+ return inter / union # [A,B]
245
+
246
+
247
+ ###############################################################################
248
+ # Tensorflow Decoding #
249
+ ###############################################################################
250
+ def decode_tf(labels, priors, variances=[0.1, 0.2]):
251
+ """tensorflow decoding"""
252
+ bbox = _decode_bbox(labels[:, :4], priors, variances)
253
+ landm = _decode_landm(labels[:, 4:14], priors, variances)
254
+ landm_valid = labels[:, 14][:, tf.newaxis]
255
+ conf = labels[:, 15][:, tf.newaxis]
256
+
257
+ return tf.concat([bbox, landm, landm_valid, conf], axis=1)
258
+
259
+
260
+ def _decode_bbox(pre, priors, variances=[0.1, 0.2]):
261
+ """Decode locations from predictions using priors to undo
262
+ the encoding we did for offset regression at train time.
263
+ Args:
264
+ pre (tensor): location predictions for loc layers,
265
+ Shape: [num_priors,4]
266
+ priors (tensor): Prior boxes in center-offset form.
267
+ Shape: [num_priors,4].
268
+ variances: (list[float]) Variances of priorboxes
269
+ Return:
270
+ decoded bounding box predictions
271
+ """
272
+ centers = priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:]
273
+ sides = priors[:, 2:] * tf.math.exp(pre[:, 2:] * variances[1])
274
+
275
+ return tf.concat([centers - sides / 2, centers + sides / 2], axis=1)
276
+
277
+
278
+ def _decode_landm(pre, priors, variances=[0.1, 0.2]):
279
+ """Decode landm from predictions using priors to undo
280
+ the encoding we did for offset regression at train time.
281
+ Args:
282
+ pre (tensor): landm predictions for loc layers,
283
+ Shape: [num_priors,10]
284
+ priors (tensor): Prior boxes in center-offset form.
285
+ Shape: [num_priors,4].
286
+ variances: (list[float]) Variances of priorboxes
287
+ Return:
288
+ decoded landm predictions
289
+ """
290
+ landms = tf.concat(
291
+ [priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
292
+ priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
293
+ priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
294
+ priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
295
+ priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:]], axis=1)
296
+ return landms