conciomith commited on
Commit
7974733
1 Parent(s): 66ca1ff

Upload retinaface_model.py

Browse files
Files changed (1) hide show
  1. retinaface_model.py +680 -0
retinaface_model.py ADDED
@@ -0,0 +1,680 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import gdown
3
+ from pathlib import Path
4
+ import os
5
+
6
+ tf_version = int(tf.__version__.split(".")[0])
7
+
8
+ if tf_version == 1:
9
+ from keras.models import Model
10
+ from keras.layers import Input, BatchNormalization, ZeroPadding2D, Conv2D, ReLU, MaxPool2D, Add, UpSampling2D, concatenate, Softmax
11
+
12
+ else:
13
+ from tensorflow.keras.models import Model
14
+ from tensorflow.keras.layers import Input, BatchNormalization, ZeroPadding2D, Conv2D, ReLU, MaxPool2D, Add, UpSampling2D, concatenate, Softmax
15
+
16
+ def load_weights(model):
17
+
18
+ home = str(Path.home())
19
+ exact_file = home+'/.deepface/weights/retinaface.h5'
20
+ #url = 'https://drive.google.com/file/d/1K3Eq2k1b9dpKkucZjPAiCCnNzfCMosK4'
21
+ #url = 'https://drive.google.com/uc?id=1K3Eq2k1b9dpKkucZjPAiCCnNzfCMosK4'
22
+ url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/retinaface.h5'
23
+
24
+ #-----------------------------
25
+
26
+ if not os.path.exists(home+"/.deepface"):
27
+ os.mkdir(home+"/.deepface")
28
+ print("Directory ",home,"/.deepface created")
29
+
30
+ if not os.path.exists(home+"/.deepface/weights"):
31
+ os.mkdir(home+"/.deepface/weights")
32
+ print("Directory ",home,"/.deepface/weights created")
33
+
34
+ #-----------------------------
35
+
36
+ if os.path.isfile(exact_file) != True:
37
+ print("retinaface.h5 will be downloaded from the url "+url)
38
+ gdown.download(url, exact_file, quiet=False)
39
+
40
+ #-----------------------------
41
+
42
+ #gdown should download the pretrained weights here. If it does not still exist, then throw an exception.
43
+ if os.path.isfile(exact_file) != True:
44
+ raise ValueError("Pre-trained weight could not be loaded!"
45
+ +" You might try to download the pre-trained weights from the url "+ url
46
+ + " and copy it to the ", exact_file, "manually.")
47
+
48
+ model.load_weights(exact_file)
49
+
50
+ return model
51
+
52
+ def build_model():
53
+
54
+ data = Input(dtype=tf.float32, shape=(None, None, 3), name='data')
55
+
56
+ bn_data = BatchNormalization(epsilon=1.9999999494757503e-05, name='bn_data', trainable=False)(data)
57
+
58
+ conv0_pad = ZeroPadding2D(padding=tuple([3, 3]))(bn_data)
59
+
60
+ conv0 = Conv2D(filters = 64, kernel_size = (7, 7), name = 'conv0', strides = [2, 2], padding = 'VALID', use_bias = False)(conv0_pad)
61
+
62
+ bn0 = BatchNormalization(epsilon=1.9999999494757503e-05, name='bn0', trainable=False)(conv0)
63
+
64
+ relu0 = ReLU(name='relu0')(bn0)
65
+
66
+ pooling0_pad = ZeroPadding2D(padding=tuple([1, 1]))(relu0)
67
+
68
+ pooling0 = MaxPool2D((3, 3), (2, 2), padding='VALID', name='pooling0')(pooling0_pad)
69
+
70
+ stage1_unit1_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit1_bn1', trainable=False)(pooling0)
71
+
72
+ stage1_unit1_relu1 = ReLU(name='stage1_unit1_relu1')(stage1_unit1_bn1)
73
+
74
+ stage1_unit1_conv1 = Conv2D(filters = 64, kernel_size = (1, 1), name = 'stage1_unit1_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit1_relu1)
75
+
76
+ stage1_unit1_sc = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage1_unit1_sc', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit1_relu1)
77
+
78
+ stage1_unit1_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit1_bn2', trainable=False)(stage1_unit1_conv1)
79
+
80
+ stage1_unit1_relu2 = ReLU(name='stage1_unit1_relu2')(stage1_unit1_bn2)
81
+
82
+ stage1_unit1_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage1_unit1_relu2)
83
+
84
+ stage1_unit1_conv2 = Conv2D(filters = 64, kernel_size = (3, 3), name = 'stage1_unit1_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit1_conv2_pad)
85
+
86
+ stage1_unit1_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit1_bn3', trainable=False)(stage1_unit1_conv2)
87
+
88
+ stage1_unit1_relu3 = ReLU(name='stage1_unit1_relu3')(stage1_unit1_bn3)
89
+
90
+ stage1_unit1_conv3 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage1_unit1_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit1_relu3)
91
+
92
+ plus0_v1 = Add()([stage1_unit1_conv3 , stage1_unit1_sc])
93
+
94
+ stage1_unit2_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit2_bn1', trainable=False)(plus0_v1)
95
+
96
+ stage1_unit2_relu1 = ReLU(name='stage1_unit2_relu1')(stage1_unit2_bn1)
97
+
98
+ stage1_unit2_conv1 = Conv2D(filters = 64, kernel_size = (1, 1), name = 'stage1_unit2_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit2_relu1)
99
+
100
+ stage1_unit2_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit2_bn2', trainable=False)(stage1_unit2_conv1)
101
+
102
+ stage1_unit2_relu2 = ReLU(name='stage1_unit2_relu2')(stage1_unit2_bn2)
103
+
104
+ stage1_unit2_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage1_unit2_relu2)
105
+
106
+ stage1_unit2_conv2 = Conv2D(filters = 64, kernel_size = (3, 3), name = 'stage1_unit2_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit2_conv2_pad)
107
+
108
+ stage1_unit2_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit2_bn3', trainable=False)(stage1_unit2_conv2)
109
+
110
+ stage1_unit2_relu3 = ReLU(name='stage1_unit2_relu3')(stage1_unit2_bn3)
111
+
112
+ stage1_unit2_conv3 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage1_unit2_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit2_relu3)
113
+
114
+ plus1_v2 = Add()([stage1_unit2_conv3 , plus0_v1])
115
+
116
+ stage1_unit3_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit3_bn1', trainable=False)(plus1_v2)
117
+
118
+ stage1_unit3_relu1 = ReLU(name='stage1_unit3_relu1')(stage1_unit3_bn1)
119
+
120
+ stage1_unit3_conv1 = Conv2D(filters = 64, kernel_size = (1, 1), name = 'stage1_unit3_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit3_relu1)
121
+
122
+ stage1_unit3_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit3_bn2', trainable=False)(stage1_unit3_conv1)
123
+
124
+ stage1_unit3_relu2 = ReLU(name='stage1_unit3_relu2')(stage1_unit3_bn2)
125
+
126
+ stage1_unit3_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage1_unit3_relu2)
127
+
128
+ stage1_unit3_conv2 = Conv2D(filters = 64, kernel_size = (3, 3), name = 'stage1_unit3_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit3_conv2_pad)
129
+
130
+ stage1_unit3_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage1_unit3_bn3', trainable=False)(stage1_unit3_conv2)
131
+
132
+ stage1_unit3_relu3 = ReLU(name='stage1_unit3_relu3')(stage1_unit3_bn3)
133
+
134
+ stage1_unit3_conv3 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage1_unit3_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage1_unit3_relu3)
135
+
136
+ plus2 = Add()([stage1_unit3_conv3 , plus1_v2])
137
+
138
+ stage2_unit1_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit1_bn1', trainable=False)(plus2)
139
+
140
+ stage2_unit1_relu1 = ReLU(name='stage2_unit1_relu1')(stage2_unit1_bn1)
141
+
142
+ stage2_unit1_conv1 = Conv2D(filters = 128, kernel_size = (1, 1), name = 'stage2_unit1_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit1_relu1)
143
+
144
+ stage2_unit1_sc = Conv2D(filters = 512, kernel_size = (1, 1), name = 'stage2_unit1_sc', strides = [2, 2], padding = 'VALID', use_bias = False)(stage2_unit1_relu1)
145
+
146
+ stage2_unit1_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit1_bn2', trainable=False)(stage2_unit1_conv1)
147
+
148
+ stage2_unit1_relu2 = ReLU(name='stage2_unit1_relu2')(stage2_unit1_bn2)
149
+
150
+ stage2_unit1_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage2_unit1_relu2)
151
+
152
+ stage2_unit1_conv2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'stage2_unit1_conv2', strides = [2, 2], padding = 'VALID', use_bias = False)(stage2_unit1_conv2_pad)
153
+
154
+ stage2_unit1_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit1_bn3', trainable=False)(stage2_unit1_conv2)
155
+
156
+ stage2_unit1_relu3 = ReLU(name='stage2_unit1_relu3')(stage2_unit1_bn3)
157
+
158
+ stage2_unit1_conv3 = Conv2D(filters = 512, kernel_size = (1, 1), name = 'stage2_unit1_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit1_relu3)
159
+
160
+ plus3 = Add()([stage2_unit1_conv3 , stage2_unit1_sc])
161
+
162
+ stage2_unit2_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit2_bn1', trainable=False)(plus3)
163
+
164
+ stage2_unit2_relu1 = ReLU(name='stage2_unit2_relu1')(stage2_unit2_bn1)
165
+
166
+ stage2_unit2_conv1 = Conv2D(filters = 128, kernel_size = (1, 1), name = 'stage2_unit2_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit2_relu1)
167
+
168
+ stage2_unit2_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit2_bn2', trainable=False)(stage2_unit2_conv1)
169
+
170
+ stage2_unit2_relu2 = ReLU(name='stage2_unit2_relu2')(stage2_unit2_bn2)
171
+
172
+ stage2_unit2_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage2_unit2_relu2)
173
+
174
+ stage2_unit2_conv2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'stage2_unit2_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit2_conv2_pad)
175
+
176
+ stage2_unit2_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit2_bn3', trainable=False)(stage2_unit2_conv2)
177
+
178
+ stage2_unit2_relu3 = ReLU(name='stage2_unit2_relu3')(stage2_unit2_bn3)
179
+
180
+ stage2_unit2_conv3 = Conv2D(filters = 512, kernel_size = (1, 1), name = 'stage2_unit2_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit2_relu3)
181
+
182
+ plus4 = Add()([stage2_unit2_conv3 , plus3])
183
+
184
+ stage2_unit3_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit3_bn1', trainable=False)(plus4)
185
+
186
+ stage2_unit3_relu1 = ReLU(name='stage2_unit3_relu1')(stage2_unit3_bn1)
187
+
188
+ stage2_unit3_conv1 = Conv2D(filters = 128, kernel_size = (1, 1), name = 'stage2_unit3_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit3_relu1)
189
+
190
+ stage2_unit3_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit3_bn2', trainable=False)(stage2_unit3_conv1)
191
+
192
+ stage2_unit3_relu2 = ReLU(name='stage2_unit3_relu2')(stage2_unit3_bn2)
193
+
194
+ stage2_unit3_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage2_unit3_relu2)
195
+
196
+ stage2_unit3_conv2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'stage2_unit3_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit3_conv2_pad)
197
+
198
+ stage2_unit3_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit3_bn3', trainable=False)(stage2_unit3_conv2)
199
+
200
+ stage2_unit3_relu3 = ReLU(name='stage2_unit3_relu3')(stage2_unit3_bn3)
201
+
202
+ stage2_unit3_conv3 = Conv2D(filters = 512, kernel_size = (1, 1), name = 'stage2_unit3_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit3_relu3)
203
+
204
+ plus5 = Add()([stage2_unit3_conv3 , plus4])
205
+
206
+ stage2_unit4_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit4_bn1', trainable=False)(plus5)
207
+
208
+ stage2_unit4_relu1 = ReLU(name='stage2_unit4_relu1')(stage2_unit4_bn1)
209
+
210
+ stage2_unit4_conv1 = Conv2D(filters = 128, kernel_size = (1, 1), name = 'stage2_unit4_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit4_relu1)
211
+
212
+ stage2_unit4_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit4_bn2', trainable=False)(stage2_unit4_conv1)
213
+
214
+ stage2_unit4_relu2 = ReLU(name='stage2_unit4_relu2')(stage2_unit4_bn2)
215
+
216
+ stage2_unit4_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage2_unit4_relu2)
217
+
218
+ stage2_unit4_conv2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'stage2_unit4_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit4_conv2_pad)
219
+
220
+ stage2_unit4_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage2_unit4_bn3', trainable=False)(stage2_unit4_conv2)
221
+
222
+ stage2_unit4_relu3 = ReLU(name='stage2_unit4_relu3')(stage2_unit4_bn3)
223
+
224
+ stage2_unit4_conv3 = Conv2D(filters = 512, kernel_size = (1, 1), name = 'stage2_unit4_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage2_unit4_relu3)
225
+
226
+ plus6 = Add()([stage2_unit4_conv3 , plus5])
227
+
228
+ stage3_unit1_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit1_bn1', trainable=False)(plus6)
229
+
230
+ stage3_unit1_relu1 = ReLU(name='stage3_unit1_relu1')(stage3_unit1_bn1)
231
+
232
+ stage3_unit1_conv1 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage3_unit1_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit1_relu1)
233
+
234
+ stage3_unit1_sc = Conv2D(filters = 1024, kernel_size = (1, 1), name = 'stage3_unit1_sc', strides = [2, 2], padding = 'VALID', use_bias = False)(stage3_unit1_relu1)
235
+
236
+ stage3_unit1_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit1_bn2', trainable=False)(stage3_unit1_conv1)
237
+
238
+ stage3_unit1_relu2 = ReLU(name='stage3_unit1_relu2')(stage3_unit1_bn2)
239
+
240
+ stage3_unit1_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage3_unit1_relu2)
241
+
242
+ stage3_unit1_conv2 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'stage3_unit1_conv2', strides = [2, 2], padding = 'VALID', use_bias = False)(stage3_unit1_conv2_pad)
243
+
244
+ ssh_m1_red_conv = Conv2D(filters = 256, kernel_size = (1, 1), name = 'ssh_m1_red_conv', strides = [1, 1], padding = 'VALID', use_bias = True)(stage3_unit1_relu2)
245
+
246
+ stage3_unit1_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit1_bn3', trainable=False)(stage3_unit1_conv2)
247
+
248
+ ssh_m1_red_conv_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m1_red_conv_bn', trainable=False)(ssh_m1_red_conv)
249
+
250
+ stage3_unit1_relu3 = ReLU(name='stage3_unit1_relu3')(stage3_unit1_bn3)
251
+
252
+ ssh_m1_red_conv_relu = ReLU(name='ssh_m1_red_conv_relu')(ssh_m1_red_conv_bn)
253
+
254
+ stage3_unit1_conv3 = Conv2D(filters = 1024, kernel_size = (1, 1), name = 'stage3_unit1_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit1_relu3)
255
+
256
+ plus7 = Add()([stage3_unit1_conv3 , stage3_unit1_sc])
257
+
258
+ stage3_unit2_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit2_bn1', trainable=False)(plus7)
259
+
260
+ stage3_unit2_relu1 = ReLU(name='stage3_unit2_relu1')(stage3_unit2_bn1)
261
+
262
+ stage3_unit2_conv1 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage3_unit2_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit2_relu1)
263
+
264
+ stage3_unit2_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit2_bn2', trainable=False)(stage3_unit2_conv1)
265
+
266
+ stage3_unit2_relu2 = ReLU(name='stage3_unit2_relu2')(stage3_unit2_bn2)
267
+
268
+ stage3_unit2_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage3_unit2_relu2)
269
+
270
+ stage3_unit2_conv2 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'stage3_unit2_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit2_conv2_pad)
271
+
272
+ stage3_unit2_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit2_bn3', trainable=False)(stage3_unit2_conv2)
273
+
274
+ stage3_unit2_relu3 = ReLU(name='stage3_unit2_relu3')(stage3_unit2_bn3)
275
+
276
+ stage3_unit2_conv3 = Conv2D(filters = 1024, kernel_size = (1, 1), name = 'stage3_unit2_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit2_relu3)
277
+
278
+ plus8 = Add()([stage3_unit2_conv3 , plus7])
279
+
280
+ stage3_unit3_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit3_bn1', trainable=False)(plus8)
281
+
282
+ stage3_unit3_relu1 = ReLU(name='stage3_unit3_relu1')(stage3_unit3_bn1)
283
+
284
+ stage3_unit3_conv1 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage3_unit3_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit3_relu1)
285
+
286
+ stage3_unit3_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit3_bn2', trainable=False)(stage3_unit3_conv1)
287
+
288
+ stage3_unit3_relu2 = ReLU(name='stage3_unit3_relu2')(stage3_unit3_bn2)
289
+
290
+ stage3_unit3_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage3_unit3_relu2)
291
+
292
+ stage3_unit3_conv2 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'stage3_unit3_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit3_conv2_pad)
293
+
294
+ stage3_unit3_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit3_bn3', trainable=False)(stage3_unit3_conv2)
295
+
296
+ stage3_unit3_relu3 = ReLU(name='stage3_unit3_relu3')(stage3_unit3_bn3)
297
+
298
+ stage3_unit3_conv3 = Conv2D(filters = 1024, kernel_size = (1, 1), name = 'stage3_unit3_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit3_relu3)
299
+
300
+ plus9 = Add()([stage3_unit3_conv3 , plus8])
301
+
302
+ stage3_unit4_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit4_bn1', trainable=False)(plus9)
303
+
304
+ stage3_unit4_relu1 = ReLU(name='stage3_unit4_relu1')(stage3_unit4_bn1)
305
+
306
+ stage3_unit4_conv1 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage3_unit4_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit4_relu1)
307
+
308
+ stage3_unit4_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit4_bn2', trainable=False)(stage3_unit4_conv1)
309
+
310
+ stage3_unit4_relu2 = ReLU(name='stage3_unit4_relu2')(stage3_unit4_bn2)
311
+
312
+ stage3_unit4_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage3_unit4_relu2)
313
+
314
+ stage3_unit4_conv2 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'stage3_unit4_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit4_conv2_pad)
315
+
316
+ stage3_unit4_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit4_bn3', trainable=False)(stage3_unit4_conv2)
317
+
318
+ stage3_unit4_relu3 = ReLU(name='stage3_unit4_relu3')(stage3_unit4_bn3)
319
+
320
+ stage3_unit4_conv3 = Conv2D(filters = 1024, kernel_size = (1, 1), name = 'stage3_unit4_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit4_relu3)
321
+
322
+ plus10 = Add()([stage3_unit4_conv3 , plus9])
323
+
324
+ stage3_unit5_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit5_bn1', trainable=False)(plus10)
325
+
326
+ stage3_unit5_relu1 = ReLU(name='stage3_unit5_relu1')(stage3_unit5_bn1)
327
+
328
+ stage3_unit5_conv1 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage3_unit5_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit5_relu1)
329
+
330
+ stage3_unit5_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit5_bn2', trainable=False)(stage3_unit5_conv1)
331
+
332
+ stage3_unit5_relu2 = ReLU(name='stage3_unit5_relu2')(stage3_unit5_bn2)
333
+
334
+ stage3_unit5_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage3_unit5_relu2)
335
+
336
+ stage3_unit5_conv2 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'stage3_unit5_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit5_conv2_pad)
337
+
338
+ stage3_unit5_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit5_bn3', trainable=False)(stage3_unit5_conv2)
339
+
340
+ stage3_unit5_relu3 = ReLU(name='stage3_unit5_relu3')(stage3_unit5_bn3)
341
+
342
+ stage3_unit5_conv3 = Conv2D(filters = 1024, kernel_size = (1, 1), name = 'stage3_unit5_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit5_relu3)
343
+
344
+ plus11 = Add()([stage3_unit5_conv3 , plus10])
345
+
346
+ stage3_unit6_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit6_bn1', trainable=False)(plus11)
347
+
348
+ stage3_unit6_relu1 = ReLU(name='stage3_unit6_relu1')(stage3_unit6_bn1)
349
+
350
+ stage3_unit6_conv1 = Conv2D(filters = 256, kernel_size = (1, 1), name = 'stage3_unit6_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit6_relu1)
351
+
352
+ stage3_unit6_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit6_bn2', trainable=False)(stage3_unit6_conv1)
353
+
354
+ stage3_unit6_relu2 = ReLU(name='stage3_unit6_relu2')(stage3_unit6_bn2)
355
+
356
+ stage3_unit6_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage3_unit6_relu2)
357
+
358
+ stage3_unit6_conv2 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'stage3_unit6_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit6_conv2_pad)
359
+
360
+ stage3_unit6_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage3_unit6_bn3', trainable=False)(stage3_unit6_conv2)
361
+
362
+ stage3_unit6_relu3 = ReLU(name='stage3_unit6_relu3')(stage3_unit6_bn3)
363
+
364
+ stage3_unit6_conv3 = Conv2D(filters = 1024, kernel_size = (1, 1), name = 'stage3_unit6_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage3_unit6_relu3)
365
+
366
+ plus12 = Add()([stage3_unit6_conv3 , plus11])
367
+
368
+ stage4_unit1_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit1_bn1', trainable=False)(plus12)
369
+
370
+ stage4_unit1_relu1 = ReLU(name='stage4_unit1_relu1')(stage4_unit1_bn1)
371
+
372
+ stage4_unit1_conv1 = Conv2D(filters = 512, kernel_size = (1, 1), name = 'stage4_unit1_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage4_unit1_relu1)
373
+
374
+ stage4_unit1_sc = Conv2D(filters = 2048, kernel_size = (1, 1), name = 'stage4_unit1_sc', strides = [2, 2], padding = 'VALID', use_bias = False)(stage4_unit1_relu1)
375
+
376
+ stage4_unit1_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit1_bn2', trainable=False)(stage4_unit1_conv1)
377
+
378
+ stage4_unit1_relu2 = ReLU(name='stage4_unit1_relu2')(stage4_unit1_bn2)
379
+
380
+ stage4_unit1_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage4_unit1_relu2)
381
+
382
+ stage4_unit1_conv2 = Conv2D(filters = 512, kernel_size = (3, 3), name = 'stage4_unit1_conv2', strides = [2, 2], padding = 'VALID', use_bias = False)(stage4_unit1_conv2_pad)
383
+
384
+ ssh_c2_lateral = Conv2D(filters = 256, kernel_size = (1, 1), name = 'ssh_c2_lateral', strides = [1, 1], padding = 'VALID', use_bias = True)(stage4_unit1_relu2)
385
+
386
+ stage4_unit1_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit1_bn3', trainable=False)(stage4_unit1_conv2)
387
+
388
+ ssh_c2_lateral_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_c2_lateral_bn', trainable=False)(ssh_c2_lateral)
389
+
390
+ stage4_unit1_relu3 = ReLU(name='stage4_unit1_relu3')(stage4_unit1_bn3)
391
+
392
+ ssh_c2_lateral_relu = ReLU(name='ssh_c2_lateral_relu')(ssh_c2_lateral_bn)
393
+
394
+ stage4_unit1_conv3 = Conv2D(filters = 2048, kernel_size = (1, 1), name = 'stage4_unit1_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage4_unit1_relu3)
395
+
396
+ plus13 = Add()([stage4_unit1_conv3 , stage4_unit1_sc])
397
+
398
+ stage4_unit2_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit2_bn1', trainable=False)(plus13)
399
+
400
+ stage4_unit2_relu1 = ReLU(name='stage4_unit2_relu1')(stage4_unit2_bn1)
401
+
402
+ stage4_unit2_conv1 = Conv2D(filters = 512, kernel_size = (1, 1), name = 'stage4_unit2_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage4_unit2_relu1)
403
+
404
+ stage4_unit2_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit2_bn2', trainable=False)(stage4_unit2_conv1)
405
+
406
+ stage4_unit2_relu2 = ReLU(name='stage4_unit2_relu2')(stage4_unit2_bn2)
407
+
408
+ stage4_unit2_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage4_unit2_relu2)
409
+
410
+ stage4_unit2_conv2 = Conv2D(filters = 512, kernel_size = (3, 3), name = 'stage4_unit2_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage4_unit2_conv2_pad)
411
+
412
+ stage4_unit2_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit2_bn3', trainable=False)(stage4_unit2_conv2)
413
+
414
+ stage4_unit2_relu3 = ReLU(name='stage4_unit2_relu3')(stage4_unit2_bn3)
415
+
416
+ stage4_unit2_conv3 = Conv2D(filters = 2048, kernel_size = (1, 1), name = 'stage4_unit2_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage4_unit2_relu3)
417
+
418
+ plus14 = Add()([stage4_unit2_conv3 , plus13])
419
+
420
+ stage4_unit3_bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit3_bn1', trainable=False)(plus14)
421
+
422
+ stage4_unit3_relu1 = ReLU(name='stage4_unit3_relu1')(stage4_unit3_bn1)
423
+
424
+ stage4_unit3_conv1 = Conv2D(filters = 512, kernel_size = (1, 1), name = 'stage4_unit3_conv1', strides = [1, 1], padding = 'VALID', use_bias = False)(stage4_unit3_relu1)
425
+
426
+ stage4_unit3_bn2 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit3_bn2', trainable=False)(stage4_unit3_conv1)
427
+
428
+ stage4_unit3_relu2 = ReLU(name='stage4_unit3_relu2')(stage4_unit3_bn2)
429
+
430
+ stage4_unit3_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(stage4_unit3_relu2)
431
+
432
+ stage4_unit3_conv2 = Conv2D(filters = 512, kernel_size = (3, 3), name = 'stage4_unit3_conv2', strides = [1, 1], padding = 'VALID', use_bias = False)(stage4_unit3_conv2_pad)
433
+
434
+ stage4_unit3_bn3 = BatchNormalization(epsilon=1.9999999494757503e-05, name='stage4_unit3_bn3', trainable=False)(stage4_unit3_conv2)
435
+
436
+ stage4_unit3_relu3 = ReLU(name='stage4_unit3_relu3')(stage4_unit3_bn3)
437
+
438
+ stage4_unit3_conv3 = Conv2D(filters = 2048, kernel_size = (1, 1), name = 'stage4_unit3_conv3', strides = [1, 1], padding = 'VALID', use_bias = False)(stage4_unit3_relu3)
439
+
440
+ plus15 = Add()([stage4_unit3_conv3 , plus14])
441
+
442
+ bn1 = BatchNormalization(epsilon=1.9999999494757503e-05, name='bn1', trainable=False)(plus15)
443
+
444
+ relu1 = ReLU(name='relu1')(bn1)
445
+
446
+ ssh_c3_lateral = Conv2D(filters = 256, kernel_size = (1, 1), name = 'ssh_c3_lateral', strides = [1, 1], padding = 'VALID', use_bias = True)(relu1)
447
+
448
+ ssh_c3_lateral_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_c3_lateral_bn', trainable=False)(ssh_c3_lateral)
449
+
450
+ ssh_c3_lateral_relu = ReLU(name='ssh_c3_lateral_relu')(ssh_c3_lateral_bn)
451
+
452
+ ssh_m3_det_conv1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_c3_lateral_relu)
453
+
454
+ ssh_m3_det_conv1 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'ssh_m3_det_conv1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m3_det_conv1_pad)
455
+
456
+ ssh_m3_det_context_conv1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_c3_lateral_relu)
457
+
458
+ ssh_m3_det_context_conv1 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m3_det_context_conv1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m3_det_context_conv1_pad)
459
+
460
+ ssh_c3_up = UpSampling2D(size=(2, 2), interpolation="nearest", name="ssh_c3_up")(ssh_c3_lateral_relu)
461
+
462
+ ssh_m3_det_conv1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m3_det_conv1_bn', trainable=False)(ssh_m3_det_conv1)
463
+
464
+ ssh_m3_det_context_conv1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m3_det_context_conv1_bn', trainable=False)(ssh_m3_det_context_conv1)
465
+
466
+ x1_shape = tf.shape(ssh_c3_up)
467
+ x2_shape = tf.shape(ssh_c2_lateral_relu)
468
+ offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, 0]
469
+ size = [-1, x2_shape[1], x2_shape[2], -1]
470
+ crop0 = tf.slice(ssh_c3_up, offsets, size, "crop0")
471
+
472
+ ssh_m3_det_context_conv1_relu = ReLU(name='ssh_m3_det_context_conv1_relu')(ssh_m3_det_context_conv1_bn)
473
+
474
+ plus0_v2 = Add()([ssh_c2_lateral_relu , crop0])
475
+
476
+ ssh_m3_det_context_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m3_det_context_conv1_relu)
477
+
478
+ ssh_m3_det_context_conv2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m3_det_context_conv2', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m3_det_context_conv2_pad)
479
+
480
+ ssh_m3_det_context_conv3_1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m3_det_context_conv1_relu)
481
+
482
+ ssh_m3_det_context_conv3_1 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m3_det_context_conv3_1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m3_det_context_conv3_1_pad)
483
+
484
+ ssh_c2_aggr_pad = ZeroPadding2D(padding=tuple([1, 1]))(plus0_v2)
485
+
486
+ ssh_c2_aggr = Conv2D(filters = 256, kernel_size = (3, 3), name = 'ssh_c2_aggr', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_c2_aggr_pad)
487
+
488
+ ssh_m3_det_context_conv2_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m3_det_context_conv2_bn', trainable=False)(ssh_m3_det_context_conv2)
489
+
490
+ ssh_m3_det_context_conv3_1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m3_det_context_conv3_1_bn', trainable=False)(ssh_m3_det_context_conv3_1)
491
+
492
+ ssh_c2_aggr_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_c2_aggr_bn', trainable=False)(ssh_c2_aggr)
493
+
494
+ ssh_m3_det_context_conv3_1_relu = ReLU(name='ssh_m3_det_context_conv3_1_relu')(ssh_m3_det_context_conv3_1_bn)
495
+
496
+ ssh_c2_aggr_relu = ReLU(name='ssh_c2_aggr_relu')(ssh_c2_aggr_bn)
497
+
498
+ ssh_m3_det_context_conv3_2_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m3_det_context_conv3_1_relu)
499
+
500
+ ssh_m3_det_context_conv3_2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m3_det_context_conv3_2', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m3_det_context_conv3_2_pad)
501
+
502
+ ssh_m2_det_conv1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_c2_aggr_relu)
503
+
504
+ ssh_m2_det_conv1 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'ssh_m2_det_conv1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m2_det_conv1_pad)
505
+
506
+ ssh_m2_det_context_conv1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_c2_aggr_relu)
507
+
508
+ ssh_m2_det_context_conv1 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m2_det_context_conv1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m2_det_context_conv1_pad)
509
+
510
+ ssh_m2_red_up = UpSampling2D(size=(2, 2), interpolation="nearest", name="ssh_m2_red_up")(ssh_c2_aggr_relu)
511
+
512
+ ssh_m3_det_context_conv3_2_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m3_det_context_conv3_2_bn', trainable=False)(ssh_m3_det_context_conv3_2)
513
+
514
+ ssh_m2_det_conv1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m2_det_conv1_bn', trainable=False)(ssh_m2_det_conv1)
515
+
516
+ ssh_m2_det_context_conv1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m2_det_context_conv1_bn', trainable=False)(ssh_m2_det_context_conv1)
517
+
518
+ x1_shape = tf.shape(ssh_m2_red_up)
519
+ x2_shape = tf.shape(ssh_m1_red_conv_relu)
520
+ offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, 0]
521
+ size = [-1, x2_shape[1], x2_shape[2], -1]
522
+ crop1 = tf.slice(ssh_m2_red_up, offsets, size, "crop1")
523
+
524
+ ssh_m3_det_concat = concatenate([ssh_m3_det_conv1_bn, ssh_m3_det_context_conv2_bn, ssh_m3_det_context_conv3_2_bn], 3, name='ssh_m3_det_concat')
525
+
526
+ ssh_m2_det_context_conv1_relu = ReLU(name='ssh_m2_det_context_conv1_relu')(ssh_m2_det_context_conv1_bn)
527
+
528
+ plus1_v1 = Add()([ssh_m1_red_conv_relu , crop1])
529
+
530
+ ssh_m3_det_concat_relu = ReLU(name='ssh_m3_det_concat_relu')(ssh_m3_det_concat)
531
+
532
+ ssh_m2_det_context_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m2_det_context_conv1_relu)
533
+
534
+ ssh_m2_det_context_conv2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m2_det_context_conv2', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m2_det_context_conv2_pad)
535
+
536
+ ssh_m2_det_context_conv3_1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m2_det_context_conv1_relu)
537
+
538
+ ssh_m2_det_context_conv3_1 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m2_det_context_conv3_1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m2_det_context_conv3_1_pad)
539
+
540
+ ssh_c1_aggr_pad = ZeroPadding2D(padding=tuple([1, 1]))(plus1_v1)
541
+
542
+ ssh_c1_aggr = Conv2D(filters = 256, kernel_size = (3, 3), name = 'ssh_c1_aggr', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_c1_aggr_pad)
543
+
544
+ face_rpn_cls_score_stride32 = Conv2D(filters = 4, kernel_size = (1, 1), name = 'face_rpn_cls_score_stride32', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m3_det_concat_relu)
545
+
546
+ inter_1 = concatenate([face_rpn_cls_score_stride32[:, :, :, 0], face_rpn_cls_score_stride32[:, :, :, 1]], axis=1)
547
+ inter_2 = concatenate([face_rpn_cls_score_stride32[:, :, :, 2], face_rpn_cls_score_stride32[:, :, :, 3]], axis=1)
548
+ final = tf.stack([inter_1, inter_2])
549
+ face_rpn_cls_score_reshape_stride32 = tf.transpose(final, (1, 2, 3, 0), name="face_rpn_cls_score_reshape_stride32")
550
+
551
+ face_rpn_bbox_pred_stride32 = Conv2D(filters = 8, kernel_size = (1, 1), name = 'face_rpn_bbox_pred_stride32', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m3_det_concat_relu)
552
+
553
+ face_rpn_landmark_pred_stride32 = Conv2D(filters = 20, kernel_size = (1, 1), name = 'face_rpn_landmark_pred_stride32', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m3_det_concat_relu)
554
+
555
+ ssh_m2_det_context_conv2_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m2_det_context_conv2_bn', trainable=False)(ssh_m2_det_context_conv2)
556
+
557
+ ssh_m2_det_context_conv3_1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m2_det_context_conv3_1_bn', trainable=False)(ssh_m2_det_context_conv3_1)
558
+
559
+ ssh_c1_aggr_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_c1_aggr_bn', trainable=False)(ssh_c1_aggr)
560
+
561
+ ssh_m2_det_context_conv3_1_relu = ReLU(name='ssh_m2_det_context_conv3_1_relu')(ssh_m2_det_context_conv3_1_bn)
562
+
563
+ ssh_c1_aggr_relu = ReLU(name='ssh_c1_aggr_relu')(ssh_c1_aggr_bn)
564
+
565
+ face_rpn_cls_prob_stride32 = Softmax(name = 'face_rpn_cls_prob_stride32')(face_rpn_cls_score_reshape_stride32)
566
+
567
+ input_shape = [tf.shape(face_rpn_cls_prob_stride32)[k] for k in range(4)]
568
+ sz = tf.dtypes.cast(input_shape[1] / 2, dtype=tf.int32)
569
+ inter_1 = face_rpn_cls_prob_stride32[:, 0:sz, :, 0]
570
+ inter_2 = face_rpn_cls_prob_stride32[:, 0:sz, :, 1]
571
+ inter_3 = face_rpn_cls_prob_stride32[:, sz:, :, 0]
572
+ inter_4 = face_rpn_cls_prob_stride32[:, sz:, :, 1]
573
+ final = tf.stack([inter_1, inter_3, inter_2, inter_4])
574
+ face_rpn_cls_prob_reshape_stride32 = tf.transpose(final, (1, 2, 3, 0), name="face_rpn_cls_prob_reshape_stride32")
575
+
576
+ ssh_m2_det_context_conv3_2_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m2_det_context_conv3_1_relu)
577
+
578
+ ssh_m2_det_context_conv3_2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m2_det_context_conv3_2', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m2_det_context_conv3_2_pad)
579
+
580
+ ssh_m1_det_conv1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_c1_aggr_relu)
581
+
582
+ ssh_m1_det_conv1 = Conv2D(filters = 256, kernel_size = (3, 3), name = 'ssh_m1_det_conv1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m1_det_conv1_pad)
583
+
584
+ ssh_m1_det_context_conv1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_c1_aggr_relu)
585
+
586
+ ssh_m1_det_context_conv1 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m1_det_context_conv1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m1_det_context_conv1_pad)
587
+
588
+ ssh_m2_det_context_conv3_2_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m2_det_context_conv3_2_bn', trainable=False)(ssh_m2_det_context_conv3_2)
589
+
590
+ ssh_m1_det_conv1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m1_det_conv1_bn', trainable=False)(ssh_m1_det_conv1)
591
+
592
+ ssh_m1_det_context_conv1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m1_det_context_conv1_bn', trainable=False)(ssh_m1_det_context_conv1)
593
+
594
+ ssh_m2_det_concat = concatenate([ssh_m2_det_conv1_bn, ssh_m2_det_context_conv2_bn, ssh_m2_det_context_conv3_2_bn], 3, name='ssh_m2_det_concat')
595
+
596
+ ssh_m1_det_context_conv1_relu = ReLU(name='ssh_m1_det_context_conv1_relu')(ssh_m1_det_context_conv1_bn)
597
+
598
+ ssh_m2_det_concat_relu = ReLU(name='ssh_m2_det_concat_relu')(ssh_m2_det_concat)
599
+
600
+ ssh_m1_det_context_conv2_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m1_det_context_conv1_relu)
601
+
602
+ ssh_m1_det_context_conv2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m1_det_context_conv2', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m1_det_context_conv2_pad)
603
+
604
+ ssh_m1_det_context_conv3_1_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m1_det_context_conv1_relu)
605
+
606
+ ssh_m1_det_context_conv3_1 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m1_det_context_conv3_1', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m1_det_context_conv3_1_pad)
607
+
608
+ face_rpn_cls_score_stride16 = Conv2D(filters = 4, kernel_size = (1, 1), name = 'face_rpn_cls_score_stride16', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m2_det_concat_relu)
609
+
610
+ inter_1 = concatenate([face_rpn_cls_score_stride16[:, :, :, 0], face_rpn_cls_score_stride16[:, :, :, 1]], axis=1)
611
+ inter_2 = concatenate([face_rpn_cls_score_stride16[:, :, :, 2], face_rpn_cls_score_stride16[:, :, :, 3]], axis=1)
612
+ final = tf.stack([inter_1, inter_2])
613
+ face_rpn_cls_score_reshape_stride16 = tf.transpose(final, (1, 2, 3, 0), name="face_rpn_cls_score_reshape_stride16")
614
+
615
+ face_rpn_bbox_pred_stride16 = Conv2D(filters = 8, kernel_size = (1, 1), name = 'face_rpn_bbox_pred_stride16', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m2_det_concat_relu)
616
+
617
+ face_rpn_landmark_pred_stride16 = Conv2D(filters = 20, kernel_size = (1, 1), name = 'face_rpn_landmark_pred_stride16', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m2_det_concat_relu)
618
+
619
+ ssh_m1_det_context_conv2_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m1_det_context_conv2_bn', trainable=False)(ssh_m1_det_context_conv2)
620
+
621
+ ssh_m1_det_context_conv3_1_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m1_det_context_conv3_1_bn', trainable=False)(ssh_m1_det_context_conv3_1)
622
+
623
+ ssh_m1_det_context_conv3_1_relu = ReLU(name='ssh_m1_det_context_conv3_1_relu')(ssh_m1_det_context_conv3_1_bn)
624
+
625
+ face_rpn_cls_prob_stride16 = Softmax(name = 'face_rpn_cls_prob_stride16')(face_rpn_cls_score_reshape_stride16)
626
+
627
+ input_shape = [tf.shape(face_rpn_cls_prob_stride16)[k] for k in range(4)]
628
+ sz = tf.dtypes.cast(input_shape[1] / 2, dtype=tf.int32)
629
+ inter_1 = face_rpn_cls_prob_stride16[:, 0:sz, :, 0]
630
+ inter_2 = face_rpn_cls_prob_stride16[:, 0:sz, :, 1]
631
+ inter_3 = face_rpn_cls_prob_stride16[:, sz:, :, 0]
632
+ inter_4 = face_rpn_cls_prob_stride16[:, sz:, :, 1]
633
+ final = tf.stack([inter_1, inter_3, inter_2, inter_4])
634
+ face_rpn_cls_prob_reshape_stride16 = tf.transpose(final, (1, 2, 3, 0), name="face_rpn_cls_prob_reshape_stride16")
635
+
636
+ ssh_m1_det_context_conv3_2_pad = ZeroPadding2D(padding=tuple([1, 1]))(ssh_m1_det_context_conv3_1_relu)
637
+
638
+ ssh_m1_det_context_conv3_2 = Conv2D(filters = 128, kernel_size = (3, 3), name = 'ssh_m1_det_context_conv3_2', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m1_det_context_conv3_2_pad)
639
+
640
+ ssh_m1_det_context_conv3_2_bn = BatchNormalization(epsilon=1.9999999494757503e-05, name='ssh_m1_det_context_conv3_2_bn', trainable=False)(ssh_m1_det_context_conv3_2)
641
+
642
+ ssh_m1_det_concat = concatenate([ssh_m1_det_conv1_bn, ssh_m1_det_context_conv2_bn, ssh_m1_det_context_conv3_2_bn], 3, name='ssh_m1_det_concat')
643
+
644
+ ssh_m1_det_concat_relu = ReLU(name='ssh_m1_det_concat_relu')(ssh_m1_det_concat)
645
+ face_rpn_cls_score_stride8 = Conv2D(filters = 4, kernel_size = (1, 1), name = 'face_rpn_cls_score_stride8', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m1_det_concat_relu)
646
+
647
+ inter_1 = concatenate([face_rpn_cls_score_stride8[:, :, :, 0], face_rpn_cls_score_stride8[:, :, :, 1]], axis=1)
648
+ inter_2 = concatenate([face_rpn_cls_score_stride8[:, :, :, 2], face_rpn_cls_score_stride8[:, :, :, 3]], axis=1)
649
+ final = tf.stack([inter_1, inter_2])
650
+ face_rpn_cls_score_reshape_stride8 = tf.transpose(final, (1, 2, 3, 0), name="face_rpn_cls_score_reshape_stride8")
651
+
652
+ face_rpn_bbox_pred_stride8 = Conv2D(filters = 8, kernel_size = (1, 1), name = 'face_rpn_bbox_pred_stride8', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m1_det_concat_relu)
653
+
654
+ face_rpn_landmark_pred_stride8 = Conv2D(filters = 20, kernel_size = (1, 1), name = 'face_rpn_landmark_pred_stride8', strides = [1, 1], padding = 'VALID', use_bias = True)(ssh_m1_det_concat_relu)
655
+
656
+ face_rpn_cls_prob_stride8 = Softmax(name = 'face_rpn_cls_prob_stride8')(face_rpn_cls_score_reshape_stride8)
657
+
658
+ input_shape = [tf.shape(face_rpn_cls_prob_stride8)[k] for k in range(4)]
659
+ sz = tf.dtypes.cast(input_shape[1] / 2, dtype=tf.int32)
660
+ inter_1 = face_rpn_cls_prob_stride8[:, 0:sz, :, 0]
661
+ inter_2 = face_rpn_cls_prob_stride8[:, 0:sz, :, 1]
662
+ inter_3 = face_rpn_cls_prob_stride8[:, sz:, :, 0]
663
+ inter_4 = face_rpn_cls_prob_stride8[:, sz:, :, 1]
664
+ final = tf.stack([inter_1, inter_3, inter_2, inter_4])
665
+ face_rpn_cls_prob_reshape_stride8 = tf.transpose(final, (1, 2, 3, 0), name="face_rpn_cls_prob_reshape_stride8")
666
+
667
+ model = Model(inputs=data,
668
+ outputs=[face_rpn_cls_prob_reshape_stride32,
669
+ face_rpn_bbox_pred_stride32,
670
+ face_rpn_landmark_pred_stride32,
671
+ face_rpn_cls_prob_reshape_stride16,
672
+ face_rpn_bbox_pred_stride16,
673
+ face_rpn_landmark_pred_stride16,
674
+ face_rpn_cls_prob_reshape_stride8,
675
+ face_rpn_bbox_pred_stride8,
676
+ face_rpn_landmark_pred_stride8
677
+ ])
678
+ model = load_weights(model)
679
+
680
+ return model