ozyman commited on
Commit
03d287b
β€’
1 Parent(s): 27420be

added dropdown, made subdir

Browse files
DeePixBiS.pth DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:ec25c8b21634d42ae47f030f16a3d1501111ad0b24abeec0a01ce2d36b25dbee
3
- size 12968981
 
 
 
 
{Classifiers β†’ DeePixBiS/Classifiers}/haarface.xml RENAMED
File without changes
Dataset.py β†’ DeePixBiS/Dataset.py RENAMED
File without changes
Loss.py β†’ DeePixBiS/Loss.py RENAMED
File without changes
Metrics.py β†’ DeePixBiS/Metrics.py RENAMED
File without changes
Model.py β†’ DeePixBiS/Model.py RENAMED
File without changes
Test.py β†’ DeePixBiS/Test.py RENAMED
@@ -3,9 +3,9 @@ import torch
3
  import torch.nn as nn
4
  from torchvision import transforms
5
  import numpy as np
6
- from Model import DeePixBiS
7
- from Loss import PixWiseBCELoss
8
- from Metrics import predict, test_accuracy, test_loss
9
 
10
  model = DeePixBiS(pretrained=False)
11
  model.load_state_dict(torch.load('./DeePixBiS.pth'))
 
3
  import torch.nn as nn
4
  from torchvision import transforms
5
  import numpy as np
6
+ from DeePixBis.Model import DeePixBiS
7
+ from DeePixBis.Loss import PixWiseBCELoss
8
+ from DeePixBis.Metrics import predict, test_accuracy, test_loss
9
 
10
  model = DeePixBiS(pretrained=False)
11
  model.load_state_dict(torch.load('./DeePixBiS.pth'))
Train.py β†’ DeePixBiS/Train.py RENAMED
@@ -3,11 +3,11 @@ import torch.nn as nn
3
  from torchvision.transforms import Compose, ToTensor, RandomHorizontalFlip, Normalize, Resize, RandomRotation
4
  import numpy as np
5
  from torch.utils.data import DataLoader
6
- from Dataset import PixWiseDataset
7
- from Model import DeePixBiS
8
- from Loss import PixWiseBCELoss
9
- from Metrics import predict, test_accuracy, test_loss
10
- from Trainer import Trainer
11
 
12
  model = DeePixBiS()
13
  model.load_state_dict(torch.load('./DeePixBiS.pth'))
 
3
  from torchvision.transforms import Compose, ToTensor, RandomHorizontalFlip, Normalize, Resize, RandomRotation
4
  import numpy as np
5
  from torch.utils.data import DataLoader
6
+ from DeePixBis.Dataset import PixWiseDataset
7
+ from DeePixBis.Model import DeePixBiS
8
+ from DeePixBis.Loss import PixWiseBCELoss
9
+ from DeePixBis.Metrics import predict, test_accuracy, test_loss
10
+ from DeePixBis.Trainer import Trainer
11
 
12
  model = DeePixBiS()
13
  model.load_state_dict(torch.load('./DeePixBiS.pth'))
Trainer.py β†’ DeePixBiS/Trainer.py RENAMED
@@ -1,6 +1,6 @@
1
  import torch
2
  import torch.nn as nn
3
- from Metrics import test_accuracy, test_loss
4
 
5
 
6
  class Trainer():
 
1
  import torch
2
  import torch.nn as nn
3
+ from DeePixBis.Metrics import test_accuracy, test_loss
4
 
5
 
6
  class Trainer():
test_data.csv β†’ DeePixBiS/test_data.csv RENAMED
File without changes
train_data.csv β†’ DeePixBiS/train_data.csv RENAMED
File without changes
app.py CHANGED
@@ -1,17 +1,20 @@
1
  import gradio as gr
 
2
  import cv2 as cv
3
  import torch
4
  from torchvision import transforms
5
- from Model import DeePixBiS
6
 
7
 
8
  labels = ['Live', 'Spoof']
9
  thresh = 0.45
10
  examples = [
11
- 'examples/1_1_21_2_33_scene_fake.jpg', 'examples/frame150_real.jpg',
12
- 'examples/1_2.avi_125_real.jpg', 'examples/1_3.avi_25_fake.jpg']
 
 
13
  device = torch.device("cpu")
14
- faceClassifier = cv.CascadeClassifier('Classifiers/haarface.xml')
15
  tfms = transforms.Compose([
16
  transforms.ToPILImage(),
17
  transforms.Resize((224, 224)),
@@ -19,7 +22,7 @@ tfms = transforms.Compose([
19
  transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
20
  ])
21
  model = DeePixBiS(pretrained=False)
22
- model.load_state_dict(torch.load('./DeePixBiS.pth'))
23
  model.eval()
24
 
25
 
@@ -35,39 +38,42 @@ def find_largest_face(faces):
35
  return largest_face
36
 
37
 
38
- def inference(img):
39
- grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
40
- faces = faceClassifier.detectMultiScale(
41
- grey, scaleFactor=1.1, minNeighbors=4)
42
- face = find_largest_face(faces)
43
  confidences = {}
44
- if face is not None:
45
- x, y, w, h = face
46
- faceRegion = img[y:y + h, x:x + w]
47
- faceRegion = cv.cvtColor(faceRegion, cv.COLOR_BGR2RGB)
48
- faceRegion = tfms(faceRegion)
49
- faceRegion = faceRegion.unsqueeze(0)
50
- mask, binary = model.forward(faceRegion)
51
- res = torch.mean(mask).item()
52
- if res < thresh:
53
- cls = 'Spoof'
54
- color = (0, 0, 255)
55
- res = 1 - res
56
- else:
57
- cls = 'Real'
58
- color = (0, 255, 0)
59
- label = f'{cls} {res:.2f}'
60
- cv.rectangle(img, (x, y), (x + w, y + h), color, 2)
61
- cv.putText(img, label, (x, y + h + 30),
62
- cv.FONT_HERSHEY_COMPLEX, 1, color)
63
- confidences = {label: res}
 
 
 
 
 
 
64
  return img, confidences
65
 
66
 
67
  if __name__ == '__main__':
68
  demo = gr.Interface(
69
  fn=inference,
70
- inputs=[gr.Image(source='webcam', shape=None, type='numpy')],
 
71
  outputs=["image", gr.Label(num_top_classes=2)],
72
  examples=examples).queue(concurrency_count=2)
73
  demo.launch(share=False)
 
1
  import gradio as gr
2
+ from gradio.components import Dropdown
3
  import cv2 as cv
4
  import torch
5
  from torchvision import transforms
6
+ from DeePixBiS.Model import DeePixBiS
7
 
8
 
9
  labels = ['Live', 'Spoof']
10
  thresh = 0.45
11
  examples = [
12
+ ['examples/1_1_21_2_33_scene_fake.jpg', "DeePixBiS"],
13
+ ['examples/frame150_real.jpg', "DeePixBiS"],
14
+ ['examples/1_2.avi_125_real.jpg', "DeePixBiS"],
15
+ ['examples/1_3.avi_25_fake.jpg', "DeePixBiS"]]
16
  device = torch.device("cpu")
17
+ faceClassifier = cv.CascadeClassifier('./DeePixBiS/Classifiers/haarface.xml')
18
  tfms = transforms.Compose([
19
  transforms.ToPILImage(),
20
  transforms.Resize((224, 224)),
 
22
  transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
23
  ])
24
  model = DeePixBiS(pretrained=False)
25
+ model.load_state_dict(torch.load('./DeePixBiS/DeePixBiS.pth'))
26
  model.eval()
27
 
28
 
 
38
  return largest_face
39
 
40
 
41
+ def inference(img, model_name):
 
 
 
 
42
  confidences = {}
43
+ if model_name == 'DeePixBiS':
44
+ grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
45
+ faces = faceClassifier.detectMultiScale(
46
+ grey, scaleFactor=1.1, minNeighbors=4)
47
+ face = find_largest_face(faces)
48
+
49
+ if face is not None:
50
+ x, y, w, h = face
51
+ faceRegion = img[y:y + h, x:x + w]
52
+ faceRegion = cv.cvtColor(faceRegion, cv.COLOR_BGR2RGB)
53
+ faceRegion = tfms(faceRegion)
54
+ faceRegion = faceRegion.unsqueeze(0)
55
+ mask, binary = model.forward(faceRegion)
56
+ res = torch.mean(mask).item()
57
+ if res < thresh:
58
+ cls = 'Spoof'
59
+ color = (0, 0, 255)
60
+ res = 1 - res
61
+ else:
62
+ cls = 'Real'
63
+ color = (0, 255, 0)
64
+ label = f'{cls} {res:.2f}'
65
+ cv.rectangle(img, (x, y), (x + w, y + h), color, 2)
66
+ cv.putText(img, label, (x, y + h + 30),
67
+ cv.FONT_HERSHEY_COMPLEX, 1, color)
68
+ confidences = {label: res}
69
  return img, confidences
70
 
71
 
72
  if __name__ == '__main__':
73
  demo = gr.Interface(
74
  fn=inference,
75
+ inputs=[gr.Image(source='webcam', shape=None, type='numpy'),
76
+ Dropdown(["DeePixBiS", "DSDG"], value="DeePixBiS")],
77
  outputs=["image", gr.Label(num_top_classes=2)],
78
  examples=examples).queue(concurrency_count=2)
79
  demo.launch(share=False)
data/Test DELETED
@@ -1 +0,0 @@
1
- <<Test>>