brxerq commited on
Commit
b831807
1 Parent(s): cfc83ad

Upload 12 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ shape_predictor_68_face_landmarks.dat filter=lfs diff=lfs merge=lfs -text
37
+ yolov4.weights filter=lfs diff=lfs merge=lfs -text
Person/101231186_1701957441.jpg ADDED
Person/101231186_1701958029.jpg ADDED
Person/101231186_1701964291.jpg ADDED
Person/10123_1701964199.jpg ADDED
Person/123_1702026855.jpg ADDED
PreTrained_coco.names ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ person
2
+ bicycle
3
+ car
4
+ motorbike
5
+ aeroplane
6
+ bus
7
+ train
8
+ truck
9
+ boat
10
+ traffic light
11
+ fire hydrant
12
+ stop sign
13
+ parking meter
14
+ bench
15
+ bird
16
+ cat
17
+ dog
18
+ horse
19
+ sheep
20
+ cow
21
+ elephant
22
+ bear
23
+ zebra
24
+ giraffe
25
+ backpack
26
+ umbrella
27
+ handbag
28
+ tie
29
+ suitcase
30
+ frisbee
31
+ skis
32
+ snowboard
33
+ sports ball
34
+ kite
35
+ baseball bat
36
+ baseball glove
37
+ skateboard
38
+ surfboard
39
+ tennis racket
40
+ bottle
41
+ wine glass
42
+ cup
43
+ fork
44
+ knife
45
+ spoon
46
+ bowl
47
+ banana
48
+ apple
49
+ sandwich
50
+ orange
51
+ broccoli
52
+ carrot
53
+ hot dog
54
+ pizza
55
+ donut
56
+ cake
57
+ chair
58
+ sofa
59
+ pottedplant
60
+ bed
61
+ diningtable
62
+ toilet
63
+ tvmonitor
64
+ laptop
65
+ mouse
66
+ remote
67
+ keyboard
68
+ cell phone
69
+ microwave
70
+ oven
71
+ toaster
72
+ sink
73
+ refrigerator
74
+ book
75
+ clock
76
+ vase
77
+ scissors
78
+ teddy bear
79
+ hair drier
80
+ toothbrush
PreTrained_yolov4.cfg ADDED
@@ -0,0 +1,1157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [net]
2
+ batch=64
3
+ subdivisions=8
4
+ # Training
5
+ #width=512
6
+ #height=512
7
+ width=608
8
+ height=608
9
+ channels=3
10
+ momentum=0.949
11
+ decay=0.0005
12
+ angle=0
13
+ saturation = 1.5
14
+ exposure = 1.5
15
+ hue=.1
16
+
17
+ learning_rate=0.0013
18
+ burn_in=1000
19
+ max_batches = 500500
20
+ policy=steps
21
+ steps=400000,450000
22
+ scales=.1,.1
23
+
24
+ #cutmix=1
25
+ mosaic=1
26
+
27
+ #:104x104 54:52x52 85:26x26 104:13x13 for 416
28
+
29
+ [convolutional]
30
+ batch_normalize=1
31
+ filters=32
32
+ size=3
33
+ stride=1
34
+ pad=1
35
+ activation=mish
36
+
37
+ # Downsample
38
+
39
+ [convolutional]
40
+ batch_normalize=1
41
+ filters=64
42
+ size=3
43
+ stride=2
44
+ pad=1
45
+ activation=mish
46
+
47
+ [convolutional]
48
+ batch_normalize=1
49
+ filters=64
50
+ size=1
51
+ stride=1
52
+ pad=1
53
+ activation=mish
54
+
55
+ [route]
56
+ layers = -2
57
+
58
+ [convolutional]
59
+ batch_normalize=1
60
+ filters=64
61
+ size=1
62
+ stride=1
63
+ pad=1
64
+ activation=mish
65
+
66
+ [convolutional]
67
+ batch_normalize=1
68
+ filters=32
69
+ size=1
70
+ stride=1
71
+ pad=1
72
+ activation=mish
73
+
74
+ [convolutional]
75
+ batch_normalize=1
76
+ filters=64
77
+ size=3
78
+ stride=1
79
+ pad=1
80
+ activation=mish
81
+
82
+ [shortcut]
83
+ from=-3
84
+ activation=linear
85
+
86
+ [convolutional]
87
+ batch_normalize=1
88
+ filters=64
89
+ size=1
90
+ stride=1
91
+ pad=1
92
+ activation=mish
93
+
94
+ [route]
95
+ layers = -1,-7
96
+
97
+ [convolutional]
98
+ batch_normalize=1
99
+ filters=64
100
+ size=1
101
+ stride=1
102
+ pad=1
103
+ activation=mish
104
+
105
+ # Downsample
106
+
107
+ [convolutional]
108
+ batch_normalize=1
109
+ filters=128
110
+ size=3
111
+ stride=2
112
+ pad=1
113
+ activation=mish
114
+
115
+ [convolutional]
116
+ batch_normalize=1
117
+ filters=64
118
+ size=1
119
+ stride=1
120
+ pad=1
121
+ activation=mish
122
+
123
+ [route]
124
+ layers = -2
125
+
126
+ [convolutional]
127
+ batch_normalize=1
128
+ filters=64
129
+ size=1
130
+ stride=1
131
+ pad=1
132
+ activation=mish
133
+
134
+ [convolutional]
135
+ batch_normalize=1
136
+ filters=64
137
+ size=1
138
+ stride=1
139
+ pad=1
140
+ activation=mish
141
+
142
+ [convolutional]
143
+ batch_normalize=1
144
+ filters=64
145
+ size=3
146
+ stride=1
147
+ pad=1
148
+ activation=mish
149
+
150
+ [shortcut]
151
+ from=-3
152
+ activation=linear
153
+
154
+ [convolutional]
155
+ batch_normalize=1
156
+ filters=64
157
+ size=1
158
+ stride=1
159
+ pad=1
160
+ activation=mish
161
+
162
+ [convolutional]
163
+ batch_normalize=1
164
+ filters=64
165
+ size=3
166
+ stride=1
167
+ pad=1
168
+ activation=mish
169
+
170
+ [shortcut]
171
+ from=-3
172
+ activation=linear
173
+
174
+ [convolutional]
175
+ batch_normalize=1
176
+ filters=64
177
+ size=1
178
+ stride=1
179
+ pad=1
180
+ activation=mish
181
+
182
+ [route]
183
+ layers = -1,-10
184
+
185
+ [convolutional]
186
+ batch_normalize=1
187
+ filters=128
188
+ size=1
189
+ stride=1
190
+ pad=1
191
+ activation=mish
192
+
193
+ # Downsample
194
+
195
+ [convolutional]
196
+ batch_normalize=1
197
+ filters=256
198
+ size=3
199
+ stride=2
200
+ pad=1
201
+ activation=mish
202
+
203
+ [convolutional]
204
+ batch_normalize=1
205
+ filters=128
206
+ size=1
207
+ stride=1
208
+ pad=1
209
+ activation=mish
210
+
211
+ [route]
212
+ layers = -2
213
+
214
+ [convolutional]
215
+ batch_normalize=1
216
+ filters=128
217
+ size=1
218
+ stride=1
219
+ pad=1
220
+ activation=mish
221
+
222
+ [convolutional]
223
+ batch_normalize=1
224
+ filters=128
225
+ size=1
226
+ stride=1
227
+ pad=1
228
+ activation=mish
229
+
230
+ [convolutional]
231
+ batch_normalize=1
232
+ filters=128
233
+ size=3
234
+ stride=1
235
+ pad=1
236
+ activation=mish
237
+
238
+ [shortcut]
239
+ from=-3
240
+ activation=linear
241
+
242
+ [convolutional]
243
+ batch_normalize=1
244
+ filters=128
245
+ size=1
246
+ stride=1
247
+ pad=1
248
+ activation=mish
249
+
250
+ [convolutional]
251
+ batch_normalize=1
252
+ filters=128
253
+ size=3
254
+ stride=1
255
+ pad=1
256
+ activation=mish
257
+
258
+ [shortcut]
259
+ from=-3
260
+ activation=linear
261
+
262
+ [convolutional]
263
+ batch_normalize=1
264
+ filters=128
265
+ size=1
266
+ stride=1
267
+ pad=1
268
+ activation=mish
269
+
270
+ [convolutional]
271
+ batch_normalize=1
272
+ filters=128
273
+ size=3
274
+ stride=1
275
+ pad=1
276
+ activation=mish
277
+
278
+ [shortcut]
279
+ from=-3
280
+ activation=linear
281
+
282
+ [convolutional]
283
+ batch_normalize=1
284
+ filters=128
285
+ size=1
286
+ stride=1
287
+ pad=1
288
+ activation=mish
289
+
290
+ [convolutional]
291
+ batch_normalize=1
292
+ filters=128
293
+ size=3
294
+ stride=1
295
+ pad=1
296
+ activation=mish
297
+
298
+ [shortcut]
299
+ from=-3
300
+ activation=linear
301
+
302
+
303
+ [convolutional]
304
+ batch_normalize=1
305
+ filters=128
306
+ size=1
307
+ stride=1
308
+ pad=1
309
+ activation=mish
310
+
311
+ [convolutional]
312
+ batch_normalize=1
313
+ filters=128
314
+ size=3
315
+ stride=1
316
+ pad=1
317
+ activation=mish
318
+
319
+ [shortcut]
320
+ from=-3
321
+ activation=linear
322
+
323
+ [convolutional]
324
+ batch_normalize=1
325
+ filters=128
326
+ size=1
327
+ stride=1
328
+ pad=1
329
+ activation=mish
330
+
331
+ [convolutional]
332
+ batch_normalize=1
333
+ filters=128
334
+ size=3
335
+ stride=1
336
+ pad=1
337
+ activation=mish
338
+
339
+ [shortcut]
340
+ from=-3
341
+ activation=linear
342
+
343
+ [convolutional]
344
+ batch_normalize=1
345
+ filters=128
346
+ size=1
347
+ stride=1
348
+ pad=1
349
+ activation=mish
350
+
351
+ [convolutional]
352
+ batch_normalize=1
353
+ filters=128
354
+ size=3
355
+ stride=1
356
+ pad=1
357
+ activation=mish
358
+
359
+ [shortcut]
360
+ from=-3
361
+ activation=linear
362
+
363
+ [convolutional]
364
+ batch_normalize=1
365
+ filters=128
366
+ size=1
367
+ stride=1
368
+ pad=1
369
+ activation=mish
370
+
371
+ [convolutional]
372
+ batch_normalize=1
373
+ filters=128
374
+ size=3
375
+ stride=1
376
+ pad=1
377
+ activation=mish
378
+
379
+ [shortcut]
380
+ from=-3
381
+ activation=linear
382
+
383
+ [convolutional]
384
+ batch_normalize=1
385
+ filters=128
386
+ size=1
387
+ stride=1
388
+ pad=1
389
+ activation=mish
390
+
391
+ [route]
392
+ layers = -1,-28
393
+
394
+ [convolutional]
395
+ batch_normalize=1
396
+ filters=256
397
+ size=1
398
+ stride=1
399
+ pad=1
400
+ activation=mish
401
+
402
+ # Downsample
403
+
404
+ [convolutional]
405
+ batch_normalize=1
406
+ filters=512
407
+ size=3
408
+ stride=2
409
+ pad=1
410
+ activation=mish
411
+
412
+ [convolutional]
413
+ batch_normalize=1
414
+ filters=256
415
+ size=1
416
+ stride=1
417
+ pad=1
418
+ activation=mish
419
+
420
+ [route]
421
+ layers = -2
422
+
423
+ [convolutional]
424
+ batch_normalize=1
425
+ filters=256
426
+ size=1
427
+ stride=1
428
+ pad=1
429
+ activation=mish
430
+
431
+ [convolutional]
432
+ batch_normalize=1
433
+ filters=256
434
+ size=1
435
+ stride=1
436
+ pad=1
437
+ activation=mish
438
+
439
+ [convolutional]
440
+ batch_normalize=1
441
+ filters=256
442
+ size=3
443
+ stride=1
444
+ pad=1
445
+ activation=mish
446
+
447
+ [shortcut]
448
+ from=-3
449
+ activation=linear
450
+
451
+
452
+ [convolutional]
453
+ batch_normalize=1
454
+ filters=256
455
+ size=1
456
+ stride=1
457
+ pad=1
458
+ activation=mish
459
+
460
+ [convolutional]
461
+ batch_normalize=1
462
+ filters=256
463
+ size=3
464
+ stride=1
465
+ pad=1
466
+ activation=mish
467
+
468
+ [shortcut]
469
+ from=-3
470
+ activation=linear
471
+
472
+
473
+ [convolutional]
474
+ batch_normalize=1
475
+ filters=256
476
+ size=1
477
+ stride=1
478
+ pad=1
479
+ activation=mish
480
+
481
+ [convolutional]
482
+ batch_normalize=1
483
+ filters=256
484
+ size=3
485
+ stride=1
486
+ pad=1
487
+ activation=mish
488
+
489
+ [shortcut]
490
+ from=-3
491
+ activation=linear
492
+
493
+
494
+ [convolutional]
495
+ batch_normalize=1
496
+ filters=256
497
+ size=1
498
+ stride=1
499
+ pad=1
500
+ activation=mish
501
+
502
+ [convolutional]
503
+ batch_normalize=1
504
+ filters=256
505
+ size=3
506
+ stride=1
507
+ pad=1
508
+ activation=mish
509
+
510
+ [shortcut]
511
+ from=-3
512
+ activation=linear
513
+
514
+
515
+ [convolutional]
516
+ batch_normalize=1
517
+ filters=256
518
+ size=1
519
+ stride=1
520
+ pad=1
521
+ activation=mish
522
+
523
+ [convolutional]
524
+ batch_normalize=1
525
+ filters=256
526
+ size=3
527
+ stride=1
528
+ pad=1
529
+ activation=mish
530
+
531
+ [shortcut]
532
+ from=-3
533
+ activation=linear
534
+
535
+
536
+ [convolutional]
537
+ batch_normalize=1
538
+ filters=256
539
+ size=1
540
+ stride=1
541
+ pad=1
542
+ activation=mish
543
+
544
+ [convolutional]
545
+ batch_normalize=1
546
+ filters=256
547
+ size=3
548
+ stride=1
549
+ pad=1
550
+ activation=mish
551
+
552
+ [shortcut]
553
+ from=-3
554
+ activation=linear
555
+
556
+
557
+ [convolutional]
558
+ batch_normalize=1
559
+ filters=256
560
+ size=1
561
+ stride=1
562
+ pad=1
563
+ activation=mish
564
+
565
+ [convolutional]
566
+ batch_normalize=1
567
+ filters=256
568
+ size=3
569
+ stride=1
570
+ pad=1
571
+ activation=mish
572
+
573
+ [shortcut]
574
+ from=-3
575
+ activation=linear
576
+
577
+ [convolutional]
578
+ batch_normalize=1
579
+ filters=256
580
+ size=1
581
+ stride=1
582
+ pad=1
583
+ activation=mish
584
+
585
+ [convolutional]
586
+ batch_normalize=1
587
+ filters=256
588
+ size=3
589
+ stride=1
590
+ pad=1
591
+ activation=mish
592
+
593
+ [shortcut]
594
+ from=-3
595
+ activation=linear
596
+
597
+ [convolutional]
598
+ batch_normalize=1
599
+ filters=256
600
+ size=1
601
+ stride=1
602
+ pad=1
603
+ activation=mish
604
+
605
+ [route]
606
+ layers = -1,-28
607
+
608
+ [convolutional]
609
+ batch_normalize=1
610
+ filters=512
611
+ size=1
612
+ stride=1
613
+ pad=1
614
+ activation=mish
615
+
616
+ # Downsample
617
+
618
+ [convolutional]
619
+ batch_normalize=1
620
+ filters=1024
621
+ size=3
622
+ stride=2
623
+ pad=1
624
+ activation=mish
625
+
626
+ [convolutional]
627
+ batch_normalize=1
628
+ filters=512
629
+ size=1
630
+ stride=1
631
+ pad=1
632
+ activation=mish
633
+
634
+ [route]
635
+ layers = -2
636
+
637
+ [convolutional]
638
+ batch_normalize=1
639
+ filters=512
640
+ size=1
641
+ stride=1
642
+ pad=1
643
+ activation=mish
644
+
645
+ [convolutional]
646
+ batch_normalize=1
647
+ filters=512
648
+ size=1
649
+ stride=1
650
+ pad=1
651
+ activation=mish
652
+
653
+ [convolutional]
654
+ batch_normalize=1
655
+ filters=512
656
+ size=3
657
+ stride=1
658
+ pad=1
659
+ activation=mish
660
+
661
+ [shortcut]
662
+ from=-3
663
+ activation=linear
664
+
665
+ [convolutional]
666
+ batch_normalize=1
667
+ filters=512
668
+ size=1
669
+ stride=1
670
+ pad=1
671
+ activation=mish
672
+
673
+ [convolutional]
674
+ batch_normalize=1
675
+ filters=512
676
+ size=3
677
+ stride=1
678
+ pad=1
679
+ activation=mish
680
+
681
+ [shortcut]
682
+ from=-3
683
+ activation=linear
684
+
685
+ [convolutional]
686
+ batch_normalize=1
687
+ filters=512
688
+ size=1
689
+ stride=1
690
+ pad=1
691
+ activation=mish
692
+
693
+ [convolutional]
694
+ batch_normalize=1
695
+ filters=512
696
+ size=3
697
+ stride=1
698
+ pad=1
699
+ activation=mish
700
+
701
+ [shortcut]
702
+ from=-3
703
+ activation=linear
704
+
705
+ [convolutional]
706
+ batch_normalize=1
707
+ filters=512
708
+ size=1
709
+ stride=1
710
+ pad=1
711
+ activation=mish
712
+
713
+ [convolutional]
714
+ batch_normalize=1
715
+ filters=512
716
+ size=3
717
+ stride=1
718
+ pad=1
719
+ activation=mish
720
+
721
+ [shortcut]
722
+ from=-3
723
+ activation=linear
724
+
725
+ [convolutional]
726
+ batch_normalize=1
727
+ filters=512
728
+ size=1
729
+ stride=1
730
+ pad=1
731
+ activation=mish
732
+
733
+ [route]
734
+ layers = -1,-16
735
+
736
+ [convolutional]
737
+ batch_normalize=1
738
+ filters=1024
739
+ size=1
740
+ stride=1
741
+ pad=1
742
+ activation=mish
743
+
744
+ ##########################
745
+
746
+ [convolutional]
747
+ batch_normalize=1
748
+ filters=512
749
+ size=1
750
+ stride=1
751
+ pad=1
752
+ activation=leaky
753
+
754
+ [convolutional]
755
+ batch_normalize=1
756
+ size=3
757
+ stride=1
758
+ pad=1
759
+ filters=1024
760
+ activation=leaky
761
+
762
+ [convolutional]
763
+ batch_normalize=1
764
+ filters=512
765
+ size=1
766
+ stride=1
767
+ pad=1
768
+ activation=leaky
769
+
770
+ ### SPP ###
771
+ [maxpool]
772
+ stride=1
773
+ size=5
774
+
775
+ [route]
776
+ layers=-2
777
+
778
+ [maxpool]
779
+ stride=1
780
+ size=9
781
+
782
+ [route]
783
+ layers=-4
784
+
785
+ [maxpool]
786
+ stride=1
787
+ size=13
788
+
789
+ [route]
790
+ layers=-1,-3,-5,-6
791
+ ### End SPP ###
792
+
793
+ [convolutional]
794
+ batch_normalize=1
795
+ filters=512
796
+ size=1
797
+ stride=1
798
+ pad=1
799
+ activation=leaky
800
+
801
+ [convolutional]
802
+ batch_normalize=1
803
+ size=3
804
+ stride=1
805
+ pad=1
806
+ filters=1024
807
+ activation=leaky
808
+
809
+ [convolutional]
810
+ batch_normalize=1
811
+ filters=512
812
+ size=1
813
+ stride=1
814
+ pad=1
815
+ activation=leaky
816
+
817
+ [convolutional]
818
+ batch_normalize=1
819
+ filters=256
820
+ size=1
821
+ stride=1
822
+ pad=1
823
+ activation=leaky
824
+
825
+ [upsample]
826
+ stride=2
827
+
828
+ [route]
829
+ layers = 85
830
+
831
+ [convolutional]
832
+ batch_normalize=1
833
+ filters=256
834
+ size=1
835
+ stride=1
836
+ pad=1
837
+ activation=leaky
838
+
839
+ [route]
840
+ layers = -1, -3
841
+
842
+ [convolutional]
843
+ batch_normalize=1
844
+ filters=256
845
+ size=1
846
+ stride=1
847
+ pad=1
848
+ activation=leaky
849
+
850
+ [convolutional]
851
+ batch_normalize=1
852
+ size=3
853
+ stride=1
854
+ pad=1
855
+ filters=512
856
+ activation=leaky
857
+
858
+ [convolutional]
859
+ batch_normalize=1
860
+ filters=256
861
+ size=1
862
+ stride=1
863
+ pad=1
864
+ activation=leaky
865
+
866
+ [convolutional]
867
+ batch_normalize=1
868
+ size=3
869
+ stride=1
870
+ pad=1
871
+ filters=512
872
+ activation=leaky
873
+
874
+ [convolutional]
875
+ batch_normalize=1
876
+ filters=256
877
+ size=1
878
+ stride=1
879
+ pad=1
880
+ activation=leaky
881
+
882
+ [convolutional]
883
+ batch_normalize=1
884
+ filters=128
885
+ size=1
886
+ stride=1
887
+ pad=1
888
+ activation=leaky
889
+
890
+ [upsample]
891
+ stride=2
892
+
893
+ [route]
894
+ layers = 54
895
+
896
+ [convolutional]
897
+ batch_normalize=1
898
+ filters=128
899
+ size=1
900
+ stride=1
901
+ pad=1
902
+ activation=leaky
903
+
904
+ [route]
905
+ layers = -1, -3
906
+
907
+ [convolutional]
908
+ batch_normalize=1
909
+ filters=128
910
+ size=1
911
+ stride=1
912
+ pad=1
913
+ activation=leaky
914
+
915
+ [convolutional]
916
+ batch_normalize=1
917
+ size=3
918
+ stride=1
919
+ pad=1
920
+ filters=256
921
+ activation=leaky
922
+
923
+ [convolutional]
924
+ batch_normalize=1
925
+ filters=128
926
+ size=1
927
+ stride=1
928
+ pad=1
929
+ activation=leaky
930
+
931
+ [convolutional]
932
+ batch_normalize=1
933
+ size=3
934
+ stride=1
935
+ pad=1
936
+ filters=256
937
+ activation=leaky
938
+
939
+ [convolutional]
940
+ batch_normalize=1
941
+ filters=128
942
+ size=1
943
+ stride=1
944
+ pad=1
945
+ activation=leaky
946
+
947
+ ##########################
948
+
949
+ [convolutional]
950
+ batch_normalize=1
951
+ size=3
952
+ stride=1
953
+ pad=1
954
+ filters=256
955
+ activation=leaky
956
+
957
+ [convolutional]
958
+ size=1
959
+ stride=1
960
+ pad=1
961
+ filters=255
962
+ activation=linear
963
+
964
+
965
+ [yolo]
966
+ mask = 0,1,2
967
+ anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
968
+ classes=80
969
+ num=9
970
+ jitter=.3
971
+ ignore_thresh = .7
972
+ truth_thresh = 1
973
+ scale_x_y = 1.2
974
+ iou_thresh=0.213
975
+ cls_normalizer=1.0
976
+ iou_normalizer=0.07
977
+ iou_loss=ciou
978
+ nms_kind=greedynms
979
+ beta_nms=0.6
980
+ max_delta=5
981
+
982
+
983
+ [route]
984
+ layers = -4
985
+
986
+ [convolutional]
987
+ batch_normalize=1
988
+ size=3
989
+ stride=2
990
+ pad=1
991
+ filters=256
992
+ activation=leaky
993
+
994
+ [route]
995
+ layers = -1, -16
996
+
997
+ [convolutional]
998
+ batch_normalize=1
999
+ filters=256
1000
+ size=1
1001
+ stride=1
1002
+ pad=1
1003
+ activation=leaky
1004
+
1005
+ [convolutional]
1006
+ batch_normalize=1
1007
+ size=3
1008
+ stride=1
1009
+ pad=1
1010
+ filters=512
1011
+ activation=leaky
1012
+
1013
+ [convolutional]
1014
+ batch_normalize=1
1015
+ filters=256
1016
+ size=1
1017
+ stride=1
1018
+ pad=1
1019
+ activation=leaky
1020
+
1021
+ [convolutional]
1022
+ batch_normalize=1
1023
+ size=3
1024
+ stride=1
1025
+ pad=1
1026
+ filters=512
1027
+ activation=leaky
1028
+
1029
+ [convolutional]
1030
+ batch_normalize=1
1031
+ filters=256
1032
+ size=1
1033
+ stride=1
1034
+ pad=1
1035
+ activation=leaky
1036
+
1037
+ [convolutional]
1038
+ batch_normalize=1
1039
+ size=3
1040
+ stride=1
1041
+ pad=1
1042
+ filters=512
1043
+ activation=leaky
1044
+
1045
+ [convolutional]
1046
+ size=1
1047
+ stride=1
1048
+ pad=1
1049
+ filters=255
1050
+ activation=linear
1051
+
1052
+
1053
+ [yolo]
1054
+ mask = 3,4,5
1055
+ anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
1056
+ classes=80
1057
+ num=9
1058
+ jitter=.3
1059
+ ignore_thresh = .7
1060
+ truth_thresh = 1
1061
+ scale_x_y = 1.1
1062
+ iou_thresh=0.213
1063
+ cls_normalizer=1.0
1064
+ iou_normalizer=0.07
1065
+ iou_loss=ciou
1066
+ nms_kind=greedynms
1067
+ beta_nms=0.6
1068
+ max_delta=5
1069
+
1070
+
1071
+ [route]
1072
+ layers = -4
1073
+
1074
+ [convolutional]
1075
+ batch_normalize=1
1076
+ size=3
1077
+ stride=2
1078
+ pad=1
1079
+ filters=512
1080
+ activation=leaky
1081
+
1082
+ [route]
1083
+ layers = -1, -37
1084
+
1085
+ [convolutional]
1086
+ batch_normalize=1
1087
+ filters=512
1088
+ size=1
1089
+ stride=1
1090
+ pad=1
1091
+ activation=leaky
1092
+
1093
+ [convolutional]
1094
+ batch_normalize=1
1095
+ size=3
1096
+ stride=1
1097
+ pad=1
1098
+ filters=1024
1099
+ activation=leaky
1100
+
1101
+ [convolutional]
1102
+ batch_normalize=1
1103
+ filters=512
1104
+ size=1
1105
+ stride=1
1106
+ pad=1
1107
+ activation=leaky
1108
+
1109
+ [convolutional]
1110
+ batch_normalize=1
1111
+ size=3
1112
+ stride=1
1113
+ pad=1
1114
+ filters=1024
1115
+ activation=leaky
1116
+
1117
+ [convolutional]
1118
+ batch_normalize=1
1119
+ filters=512
1120
+ size=1
1121
+ stride=1
1122
+ pad=1
1123
+ activation=leaky
1124
+
1125
+ [convolutional]
1126
+ batch_normalize=1
1127
+ size=3
1128
+ stride=1
1129
+ pad=1
1130
+ filters=1024
1131
+ activation=leaky
1132
+
1133
+ [convolutional]
1134
+ size=1
1135
+ stride=1
1136
+ pad=1
1137
+ filters=255
1138
+ activation=linear
1139
+
1140
+
1141
+ [yolo]
1142
+ mask = 6,7,8
1143
+ anchors = 12, 16, 19, 36, 40, 28, 36, 75, 76, 55, 72, 146, 142, 110, 192, 243, 459, 401
1144
+ classes=80
1145
+ num=9
1146
+ jitter=.3
1147
+ ignore_thresh = .7
1148
+ truth_thresh = 1
1149
+ random=1
1150
+ scale_x_y = 1.05
1151
+ iou_thresh=0.213
1152
+ cls_normalizer=1.0
1153
+ iou_normalizer=0.07
1154
+ iou_loss=ciou
1155
+ nms_kind=greedynms
1156
+ beta_nms=0.6
1157
+ max_delta=5
__pycache__/anti_spoofing.cpython-311.pyc ADDED
Binary file (13.2 kB). View file
 
anti_spoofing.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import all the libraries
2
+ import cv2
3
+ import dlib
4
+ import numpy as np
5
+ import os
6
+ import time
7
+ import mediapipe as mp
8
+ from skimage import feature
9
+
10
+ # I'm setting up the face and hand detectors here.
11
+ class AntiSpoofingSystem:
12
+ def __init__(self):
13
+ self.detector = dlib.get_frontal_face_detector()
14
+ self.predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
15
+
16
+ # Here I initialize MediaPipe for hand gesture detection.
17
+ self.mp_hands = mp.solutions.hands
18
+ self.hands = self.mp_hands.Hands(static_image_mode=False, max_num_hands=1, min_detection_confidence=0.7)
19
+
20
+
21
+ # This code is for Webcam if you have Jetson kit change value from 0 to 1.
22
+ self.cap = cv2.VideoCapture(0)
23
+ self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
24
+ self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
25
+
26
+ # I create a directory to save the captured images if it doesn't exist.
27
+ self.save_directory = "Person"
28
+ if not os.path.exists(self.save_directory):
29
+ os.makedirs(self.save_directory)
30
+
31
+
32
+ # Iam loading the Pre-trained model to detect smartphones.
33
+ self.net_smartphone = cv2.dnn.readNet('yolov4.weights', 'PreTrained_yolov4.cfg')
34
+ with open('PreTrained_coco.names', 'r') as f:
35
+ self.classes_smartphone = f.read().strip().split('\n')
36
+
37
+
38
+ # Setting some thresholds for eye aspect ratio to detect blinks.
39
+ self.EAR_THRESHOLD = 0.2
40
+ self.BLINK_CONSEC_FRAMES = 4
41
+
42
+ # Initializing some variables to keep track of eye states and blink counts.
43
+ self.left_eye_state = False
44
+ self.right_eye_state = False
45
+ self.left_blink_counter = 0
46
+ self.right_blink_counter = 0
47
+
48
+ # Variables to manage smartphone detection.
49
+ self.smartphone_detected = False
50
+ self.smartphone_detection_frame_interval = 10
51
+ self.frame_count = 0
52
+
53
+ # New attributes for student data
54
+ self.student_id = None
55
+ self.student_name = None
56
+
57
+
58
+ # It is calculating the eye aspect ratio to detect blinks.
59
+ def calculate_ear(self, eye):
60
+ A = np.linalg.norm(eye[1] - eye[5])
61
+ B = np.linalg.norm(eye[2] - eye[4])
62
+ C = np.linalg.norm(eye[0] - eye[3])
63
+ return (A + B) / (2.0 * C)
64
+
65
+
66
+ # Analyzing the texture of the face to check for liveness.
67
+ def analyze_texture(self, face_region):
68
+ gray_face = cv2.cvtColor(face_region, cv2.COLOR_BGR2GRAY)
69
+ lbp = feature.local_binary_pattern(gray_face, P=8, R=1, method="uniform")
70
+ lbp_hist, _ = np.histogram(lbp.ravel(), bins=np.arange(0, 58), range=(0, 58))
71
+ lbp_hist = lbp_hist.astype("float")
72
+ lbp_hist /= (lbp_hist.sum() + 1e-5)
73
+ return np.sum(lbp_hist[:10]) > 0.3
74
+
75
+ # Detecting hand using MediaPipe.
76
+ def detect_hand_gesture(self, frame):
77
+ results = self.hands.process(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
78
+ return results.multi_hand_landmarks is not None
79
+
80
+ # Detecting smartphones in the frame to prevent System Bypass.
81
+ def detect_smartphone(self, frame):
82
+ if self.frame_count % self.smartphone_detection_frame_interval == 0:
83
+ blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (224, 224), swapRB=True, crop=False)
84
+ self.net_smartphone.setInput(blob)
85
+ output_layers_names = self.net_smartphone.getUnconnectedOutLayersNames()
86
+ detections = self.net_smartphone.forward(output_layers_names)
87
+
88
+ for detection in detections:
89
+ for obj in detection:
90
+ scores = obj[5:]
91
+ class_id = np.argmax(scores)
92
+ confidence = scores[class_id]
93
+ if confidence > 0.3 and self.classes_smartphone[class_id] == 'cell phone':
94
+ center_x = int(obj[0] * frame.shape[1])
95
+ center_y = int(obj[1] * frame.shape[0])
96
+ width = int(obj[2] * frame.shape[1])
97
+ height = int(obj[3] * frame.shape[0])
98
+ left = int(center_x - width / 2)
99
+ top = int(center_y - height / 2)
100
+
101
+ cv2.rectangle(frame, (left, top), (left + width, top + height), (0, 0, 255), 2)
102
+ cv2.putText(frame, 'Smartphone Detected', (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
103
+
104
+ self.smartphone_detected = True
105
+ self.left_blink_counter = 0
106
+ self.right_blink_counter = 0
107
+ return
108
+
109
+ self.frame_count += 1
110
+ self.smartphone_detected = False
111
+
112
+ # Checking if the user blinked to confirm their presence.
113
+ def detect_blink(self, left_ear, right_ear):
114
+ if self.smartphone_detected:
115
+ self.left_eye_state = False
116
+ self.right_eye_state = False
117
+ self.left_blink_counter = 0
118
+ self.right_blink_counter = 0
119
+ return False
120
+
121
+ # Incrementing blink counter if a blink is detected.
122
+ if left_ear < self.EAR_THRESHOLD:
123
+ if not self.left_eye_state:
124
+ self.left_eye_state = True
125
+ else:
126
+ if self.left_eye_state:
127
+ self.left_eye_state = False
128
+ self.left_blink_counter += 1
129
+
130
+ if right_ear < self.EAR_THRESHOLD:
131
+ if not self.right_eye_state:
132
+ self.right_eye_state = True
133
+ else:
134
+ if self.right_eye_state:
135
+ self.right_eye_state = False
136
+ self.right_blink_counter += 1
137
+
138
+
139
+ # Resetting blink counters after a successful blink detection.
140
+ if self.left_blink_counter > 0 and self.right_blink_counter > 0:
141
+ self.left_blink_counter = 0
142
+ self.right_blink_counter = 0
143
+ return True
144
+ else:
145
+ return False
146
+
147
+ # Main loop to process the video feed.
148
+ def run(self, update_frame_callback=None):
149
+ blink_count = 0
150
+ hand_gesture_detected = False
151
+ image_captured = False
152
+ last_event_time = time.time()
153
+ event_timeout = 60
154
+ message_displayed = False
155
+
156
+ while True:
157
+ ret, frame = self.cap.read()
158
+ if not ret:
159
+ break
160
+
161
+ # Detecting smartphones in the frame.
162
+ self.detect_smartphone(frame)
163
+
164
+ # Displaying a warning if a smartphone is detected.
165
+ if self.smartphone_detected:
166
+ cv2.putText(frame, "Mobile phone detected, can't record attendance", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
167
+ blink_count = 0
168
+
169
+ # Processing each frame to detect faces, blinks, and hand gestures.
170
+ if not self.smartphone_detected:
171
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
172
+ faces = self.detector(gray)
173
+
174
+ for face in faces:
175
+ landmarks = self.predictor(gray, face)
176
+ leftEye = np.array([(landmarks.part(n).x, landmarks.part(n).y) for n in range(36, 42)])
177
+ rightEye = np.array([(landmarks.part(n).x, landmarks.part(n).y) for n in range(42, 48)])
178
+
179
+ ear_left = self.calculate_ear(leftEye)
180
+ ear_right = self.calculate_ear(rightEye)
181
+
182
+ if self.detect_blink(ear_left, ear_right):
183
+ blink_count += 1
184
+
185
+ # Prionting and Incrementing blink Count
186
+ cv2.putText(frame, f"Blink Count: {blink_count}", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
187
+
188
+ hand_gesture_detected = self.detect_hand_gesture(frame)
189
+
190
+ # Indicating when a hand gesture is detected.
191
+ if hand_gesture_detected:
192
+ cv2.putText(frame, "Hand Gesture Detected", (10, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
193
+
194
+ (x, y, w, h) = (face.left(), face.top(), face.width(), face.height())
195
+ expanded_region = frame[max(y - h // 2, 0):min(y + 3 * h // 2, frame.shape[0]),
196
+ max(x - w // 2, 0):min(x + 3 * w // 2, frame.shape[1])]
197
+
198
+ # Checking if the conditions are met to capture the image.
199
+ if blink_count >= 5 and hand_gesture_detected and self.analyze_texture(expanded_region) and not message_displayed:
200
+ cv2.putText(frame, "Please hold still for 2 seconds...", (10, 150), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
201
+ cv2.imshow("Frame", frame)
202
+ cv2.waitKey(1)
203
+ time.sleep(2)
204
+ message_displayed = True
205
+
206
+ if message_displayed and not image_captured:
207
+ timestamp = int(time.time())
208
+ picture_name = f"{self.student_id}_{timestamp}.jpg"
209
+ cv2.imwrite(os.path.join(self.save_directory, picture_name), expanded_region)
210
+ image_captured = True
211
+
212
+ if update_frame_callback:
213
+ update_frame_callback(frame)
214
+
215
+ cv2.imshow("Frame", frame)
216
+ if image_captured or (time.time() - last_event_time > event_timeout and not hand_gesture_detected):
217
+ break
218
+ if cv2.waitKey(1) & 0xFF == ord('q'):
219
+ break
220
+
221
+ self.cap.release()
222
+ cv2.destroyAllWindows()
223
+
224
+ #If person if real and did all the required features then his attendance will be marked if not then it will print no person detected.
225
+ if image_captured:
226
+ print(f"Person detected. Face image captured and saved as {picture_name}.")
227
+ elif not hand_gesture_detected:
228
+ print("No real person detected")
229
+
230
+ if __name__ == "__main__":
231
+ anti_spoofing_system = AntiSpoofingSystem()
232
+ anti_spoofing_system.run()
app.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import tkinter as tk
3
+ from tkinter import messagebox
4
+ from PIL import Image, ImageTk
5
+ import threading
6
+ import cv2
7
+ from anti_spoofing import AntiSpoofingSystem
8
+
9
+ class AntiSpoofingGUI:
10
+ def __init__(self, anti_spoofing_system):
11
+ self.anti_spoofing_system = anti_spoofing_system
12
+ self.window = tk.Tk()
13
+ self.window.title("Anti-Spoofing System")
14
+
15
+ self.student_id_label = tk.Label(self.window, text="Student ID:")
16
+ self.student_id_label.pack()
17
+ self.student_id_entry = tk.Entry(self.window)
18
+ self.student_id_entry.pack()
19
+
20
+ self.student_name_label = tk.Label(self.window, text="Student Name:")
21
+ self.student_name_label.pack()
22
+ self.student_name_entry = tk.Entry(self.window)
23
+ self.student_name_entry.pack()
24
+
25
+ self.start_button = tk.Button(self.window, text="Start", command=self.start_anti_spoofing)
26
+ self.start_button.pack()
27
+
28
+ self.image_label = tk.Label(self.window)
29
+ self.image_label.pack()
30
+
31
+ # Create a PhotoImage object to use for the video feed
32
+ self.photo = ImageTk.PhotoImage("RGB", (640, 480))
33
+
34
+ def start_anti_spoofing(self):
35
+ self.student_id = self.student_id_entry.get()
36
+ self.student_name = self.student_name_entry.get()
37
+
38
+ if not self.student_id or not self.student_name:
39
+ messagebox.showwarning("Warning", "Please enter both Student ID and Name")
40
+ return
41
+
42
+ threading.Thread(target=self.run_anti_spoofing, daemon=True).start()
43
+
44
+ def run_anti_spoofing(self):
45
+ self.anti_spoofing_system.student_id = self.student_id
46
+ self.anti_spoofing_system.student_name = self.student_name
47
+ self.anti_spoofing_system.run(self.update_frame)
48
+
49
+ def update_frame(self, frame):
50
+ cv2image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGBA)
51
+ self.photo.paste(Image.fromarray(cv2image))
52
+ self.image_label.config(image=self.photo)
53
+ self.image_label.update_idletasks()
54
+
55
+ def run(self):
56
+ self.window.mainloop()
57
+
58
+ if __name__ == "__main__":
59
+ anti_spoofing_system = AntiSpoofingSystem()
60
+ gui = AntiSpoofingGUI(anti_spoofing_system)
61
+ gui.run()
shape_predictor_68_face_landmarks.dat ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbdc2cb80eb9aa7a758672cbfdda32ba6300efe9b6e6c7a299ff7e736b11b92f
3
+ size 99693937
yolov4.weights ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8a4f6c62188738d86dc6898d82724ec0964d0eb9d2ae0f0a9d53d65d108d562
3
+ size 257717640