thugCodeNinja commited on
Commit
119338f
1 Parent(s): db21c2e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -52
app.py CHANGED
@@ -2,8 +2,6 @@ import gradio as gr
2
  import cv2
3
  import numpy as np
4
  import tensorflow as tf
5
- import tensorflow_addons
6
-
7
  from facenet_pytorch import MTCNN
8
  from PIL import Image
9
  import moviepy.editor as mp
@@ -13,61 +11,43 @@ import zipfile
13
  # Load face detector
14
  mtcnn = MTCNN(margin=14, keep_all=True, factor=0.7, device='cpu')
15
 
16
- #Face Detection function, Reference: (Timesler, 2020); Source link: https://www.kaggle.com/timesler/facial-recognition-model-in-pytorch
17
  class DetectionPipeline:
18
- """Pipeline class for detecting faces in the frames of a video file."""
19
-
20
  def __init__(self, detector, n_frames=None, batch_size=60, resize=None):
21
- """Constructor for DetectionPipeline class.
22
- """
23
  self.detector = detector
24
  self.n_frames = n_frames
25
  self.batch_size = batch_size
26
  self.resize = resize
27
 
28
  def __call__(self, filename):
29
- """Load frames from an MP4 video and detect faces.
30
-
31
- Arguments:
32
- filename {str} -- Path to video.
33
- """
34
- # Create video reader and find length
35
  v_cap = cv2.VideoCapture(filename)
36
  v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT))
37
 
38
- # Pick 'n_frames' evenly spaced frames to sample
39
  if self.n_frames is None:
40
  sample = np.arange(0, v_len)
41
  else:
42
  sample = np.linspace(0, v_len - 1, self.n_frames).astype(int)
43
 
44
- # Loop through frames
45
  faces = []
46
  frames = []
47
  for j in range(v_len):
48
  success = v_cap.grab()
49
  if j in sample:
50
- # Load frame
51
  success, frame = v_cap.retrieve()
52
  if not success:
53
  continue
54
  frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
55
- # frame = Image.fromarray(frame)
56
 
57
- # Resize frame to desired size
58
  if self.resize is not None:
59
- frame = frame.resize([int(d * self.resize) for d in frame.size])
 
60
  frames.append(frame)
61
 
62
- # When batch is full, detect faces and reset frame list
63
  if len(frames) % self.batch_size == 0 or j == sample[-1]:
64
-
65
- boxes, probs = self.detector.detect(frames)
66
-
67
  for i in range(len(frames)):
68
-
69
  if boxes[i] is None:
70
- faces.append(face2) #append previous face frame if no face is detected
71
  continue
72
 
73
  box = boxes[i][0].astype(int)
@@ -75,11 +55,10 @@ class DetectionPipeline:
75
  face = frame[box[1]:box[3], box[0]:box[2]]
76
 
77
  if not face.any():
78
- faces.append(face2) #append previous face frame if no face is detected
79
  continue
80
 
81
  face2 = cv2.resize(face, (224, 224))
82
-
83
  faces.append(face2)
84
 
85
  frames = []
@@ -88,14 +67,11 @@ class DetectionPipeline:
88
 
89
  return faces
90
 
91
-
92
- detection_pipeline = DetectionPipeline(detector=mtcnn,n_frames=20, batch_size=60)
93
 
94
  model = tf.keras.models.load_model("p1")
95
 
96
-
97
  def deepfakespredict(input_video):
98
-
99
  faces = detection_pipeline(input_video)
100
 
101
  total = 0
@@ -103,22 +79,21 @@ def deepfakespredict(input_video):
103
  fake = 0
104
 
105
  for face in faces:
106
-
107
- face2 = face/255
108
  pred = model.predict(np.expand_dims(face2, axis=0))[0]
109
- total+=1
110
 
111
  pred2 = pred[1]
112
 
113
  if pred2 > 0.5:
114
- fake+=1
115
  else:
116
- real+=1
117
 
118
- fake_ratio = fake/total
119
 
120
- text =""
121
- text2 = "Deepfakes Confidence: " + str(fake_ratio*100) + "%"
122
 
123
  if fake_ratio >= 0.5:
124
  text = "The video is FAKE."
@@ -126,27 +101,25 @@ def deepfakespredict(input_video):
126
  text = "The video is REAL."
127
 
128
  face_frames = []
129
-
130
  for face in faces:
131
  face_frame = Image.fromarray(face.astype('uint8'), 'RGB')
132
  face_frames.append(face_frame)
133
-
134
- face_frames[0].save('results.gif', save_all=True, append_images=face_frames[1:], duration = 250, loop = 100 )
135
  clip = mp.VideoFileClip("results.gif")
136
  clip.write_videofile("video.mp4")
137
 
138
  return text, text2, "video.mp4"
139
 
140
-
141
-
142
- title="Group 2- EfficientNetV2 based Deepfake Video Detector"
143
- description='''Please upload videos responsibly and await the results in a gif. The approach in place includes breaking down the video into several frames followed by collecting
144
  the frames that contain a face. Once these frames are collected the trained model attempts to predict if the face is fake or real and contribute to a deepfake confidence. This confidence level eventually
145
  determines if the video can be considered a fake or not.'''
146
-
147
  gr.Interface(deepfakespredict,
148
- inputs = ["video"],
149
- outputs=["text","text", gr.Video(label="Detected face sequence")],
150
- title=title,
151
- description=description
152
- ).launch()
 
2
  import cv2
3
  import numpy as np
4
  import tensorflow as tf
 
 
5
  from facenet_pytorch import MTCNN
6
  from PIL import Image
7
  import moviepy.editor as mp
 
11
  # Load face detector
12
  mtcnn = MTCNN(margin=14, keep_all=True, factor=0.7, device='cpu')
13
 
14
+ # Face Detection function
15
  class DetectionPipeline:
 
 
16
  def __init__(self, detector, n_frames=None, batch_size=60, resize=None):
 
 
17
  self.detector = detector
18
  self.n_frames = n_frames
19
  self.batch_size = batch_size
20
  self.resize = resize
21
 
22
  def __call__(self, filename):
 
 
 
 
 
 
23
  v_cap = cv2.VideoCapture(filename)
24
  v_len = int(v_cap.get(cv2.CAP_PROP_FRAME_COUNT))
25
 
 
26
  if self.n_frames is None:
27
  sample = np.arange(0, v_len)
28
  else:
29
  sample = np.linspace(0, v_len - 1, self.n_frames).astype(int)
30
 
 
31
  faces = []
32
  frames = []
33
  for j in range(v_len):
34
  success = v_cap.grab()
35
  if j in sample:
 
36
  success, frame = v_cap.retrieve()
37
  if not success:
38
  continue
39
  frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
 
40
 
 
41
  if self.resize is not None:
42
+ frame = cv2.resize(frame, (int(frame.shape[1] * self.resize), int(frame.shape[0] * self.resize)))
43
+
44
  frames.append(frame)
45
 
 
46
  if len(frames) % self.batch_size == 0 or j == sample[-1]:
47
+ boxes, _ = self.detector.detect(frames)
 
 
48
  for i in range(len(frames)):
 
49
  if boxes[i] is None:
50
+ faces.append(face2)
51
  continue
52
 
53
  box = boxes[i][0].astype(int)
 
55
  face = frame[box[1]:box[3], box[0]:box[2]]
56
 
57
  if not face.any():
58
+ faces.append(face2)
59
  continue
60
 
61
  face2 = cv2.resize(face, (224, 224))
 
62
  faces.append(face2)
63
 
64
  frames = []
 
67
 
68
  return faces
69
 
70
+ detection_pipeline = DetectionPipeline(detector=mtcnn, n_frames=20, batch_size=60)
 
71
 
72
  model = tf.keras.models.load_model("p1")
73
 
 
74
  def deepfakespredict(input_video):
 
75
  faces = detection_pipeline(input_video)
76
 
77
  total = 0
 
79
  fake = 0
80
 
81
  for face in faces:
82
+ face2 = face / 255
 
83
  pred = model.predict(np.expand_dims(face2, axis=0))[0]
84
+ total += 1
85
 
86
  pred2 = pred[1]
87
 
88
  if pred2 > 0.5:
89
+ fake += 1
90
  else:
91
+ real += 1
92
 
93
+ fake_ratio = fake / total
94
 
95
+ text = ""
96
+ text2 = "Deepfakes Confidence: " + str(fake_ratio * 100) + "%"
97
 
98
  if fake_ratio >= 0.5:
99
  text = "The video is FAKE."
 
101
  text = "The video is REAL."
102
 
103
  face_frames = []
104
+
105
  for face in faces:
106
  face_frame = Image.fromarray(face.astype('uint8'), 'RGB')
107
  face_frames.append(face_frame)
108
+
109
+ face_frames[0].save('results.gif', save_all=True, append_images=face_frames[1:], duration=250, loop=100)
110
  clip = mp.VideoFileClip("results.gif")
111
  clip.write_videofile("video.mp4")
112
 
113
  return text, text2, "video.mp4"
114
 
115
+ title = "Group 2- EfficientNetV2 based Deepfake Video Detector"
116
+ description = '''Please upload videos responsibly and await the results in a gif. The approach in place includes breaking down the video into several frames followed by collecting
 
 
117
  the frames that contain a face. Once these frames are collected the trained model attempts to predict if the face is fake or real and contribute to a deepfake confidence. This confidence level eventually
118
  determines if the video can be considered a fake or not.'''
119
+
120
  gr.Interface(deepfakespredict,
121
+ inputs=["video"],
122
+ outputs=["text", "text", gr.Video(label="Detected face sequence")],
123
+ title=title,
124
+ description=description
125
+ ).launch()