File size: 3,211 Bytes
abd7b7e
 
 
 
 
 
 
 
a712711
 
 
bd20c9a
61eb35d
 
 
 
abd7b7e
352aacf
 
 
 
 
cac49a7
 
 
 
 
35794e6
 
 
352aacf
 
abd7b7e
 
f2bbbf2
352aacf
 
 
a9b746b
352aacf
a9b746b
352aacf
 
abd7b7e
352aacf
abd7b7e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
352aacf
abd7b7e
 
 
352aacf
abd7b7e
 
 
 
f2bbbf2
35794e6
f2bbbf2
 
 
 
35794e6
 
f2bbbf2
 
 
 
35794e6
 
f2bbbf2
 
56892c1
352aacf
 
abd7b7e
876d1b3
 
b753d9c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import gradio as gr

import cv2
from mtcnn.mtcnn import MTCNN
import tensorflow as tf
import tensorflow_addons
import numpy as np

import os
import zipfile


local_zip = "FINAL-EFFICIENTNETV2-B0.zip"
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('FINAL-EFFICIENTNETV2-B0')
zip_ref.close()

local_zip = "FINAL-EFFICIENTNETV2-S.zip"
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('FINAL-EFFICIENTNETV2-S')
zip_ref.close()

local_zip = "deepfakes-test-images.zip"
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('deepfakes-test-images')
zip_ref.close()


model_b0 = tf.keras.models.load_model("FINAL-EFFICIENTNETV2-B0")
model_s = tf.keras.models.load_model("FINAL-EFFICIENTNETV2-S")

detector = MTCNN()


def deepfakespredict(select_model, input_img ):

    tf.keras.backend.clear_session()
    
    if select_model == "EfficientNetV2-B0":
        model = model_b0
    elif select_model == "EfficientNetV2-B0":
        model = model_s
    
    text =""
    face = detector.detect_faces(input_img)

    if len(face) > 0:
        x, y, width, height = face[0]['box']
        x2, y2 = x + width, y + height

        cv2.rectangle(input_img, (x, y), (x2, y2), (0, 255, 0), 2)

        face_image = input_img[y:y2, x:x2]
        face_image2 = cv2.cvtColor(face_image, cv2.COLOR_BGR2RGB)
        face_image3 = cv2.resize(face_image2, (224, 224))
        face_image4 = face_image3/255

        pred = model.predict(np.expand_dims(face_image4, axis=0))[0]

        if pred[1] >= 0.6:
            text = "The image is fake."
        elif pred[0] >= 0.6:
            text = "The image is real."
        else:
            text = "The image might be real or fake."
            
    else:
        text = "Face is not detected in the image."

    return text, input_img, {labels[i]: float(pred[i]) for i in range(2)}


title="EfficientNetV2 Deepfakes Image Detector"
description="This is a demo implementation of EfficientNetV2 Deepfakes Image Detector. To use it, simply upload your image, or click one of the examples to load them."
examples = [

                ['deepfakes-test-images/Fake-1.jpg'],
                ['deepfakes-test-images/Fake-2.jpg'],
                ['deepfakes-test-images/Fake-3.jpg'],
                ['deepfakes-test-images/Fake-4.jpg'],
                ['deepfakes-test-images/Fake-5.jpg'],
  
                ['deepfakes-test-images/Real-1.jpg'],
                ['deepfakes-test-images/Real-2.jpg'],
                ['deepfakes-test-images/Real-3.jpg'],
                ['deepfakes-test-images/Real-4.jpg'],
                ['deepfakes-test-images/Real-5.jpg']

           ]
            
gr.Interface(deepfakespredict,
                     inputs = [gr.inputs.Radio(["EfficientNetV2-B0", "EfficientNetV2-S"], label = "Select model:"), "image"],
                     outputs=["text", gr.outputs.Image(type="pil", label="Detected face"), gr.outputs.Label(num_top_classes=None, type="auto", label="Confidence")],
                     title=title,
                     description=description,
                     examples=examples
                     ).launch()