ompathak commited on
Commit
16ab624
1 Parent(s): e9aa3d1

Upload 7 files

Browse files
deepfake-image-detection/.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ftz filter=lfs diff=lfs merge=lfs -text
6
+ *.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.h5 filter=lfs diff=lfs merge=lfs -text
8
+ *.joblib filter=lfs diff=lfs merge=lfs -text
9
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
10
+ *.model filter=lfs diff=lfs merge=lfs -text
11
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
12
+ *.onnx filter=lfs diff=lfs merge=lfs -text
13
+ *.ot filter=lfs diff=lfs merge=lfs -text
14
+ *.parquet filter=lfs diff=lfs merge=lfs -text
15
+ *.pb filter=lfs diff=lfs merge=lfs -text
16
+ *.pt filter=lfs diff=lfs merge=lfs -text
17
+ *.pth filter=lfs diff=lfs merge=lfs -text
18
+ *.rar filter=lfs diff=lfs merge=lfs -text
19
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
20
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
21
+ *.tflite filter=lfs diff=lfs merge=lfs -text
22
+ *.tgz filter=lfs diff=lfs merge=lfs -text
23
+ *.wasm filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
deepfake-image-detection/.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ examples/
deepfake-image-detection/README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Deepfake Detection
3
+ emoji: 📚
4
+ colorFrom: red
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 3.41.2
8
+ #3.0.14
9
+ app_file: app.py
10
+ pinned: false
11
+ license: apache-2.0
12
+ ---
13
+
14
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
deepfake-image-detection/app.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import torch.nn.functional as F
4
+ from facenet_pytorch import MTCNN, InceptionResnetV1
5
+ import os
6
+ import numpy as np
7
+ from PIL import Image
8
+ import zipfile
9
+ import cv2
10
+ from pytorch_grad_cam import GradCAM
11
+ from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
12
+ from pytorch_grad_cam.utils.image import show_cam_on_image
13
+
14
+ #gr.themes.Soft()
15
+ #gr.themes.builder()
16
+
17
+ with zipfile.ZipFile("examples.zip","r") as zip_ref:
18
+ zip_ref.extractall(".")
19
+
20
+ DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
21
+ '''cuda:0'''
22
+ mtcnn = MTCNN(
23
+ select_largest=False,
24
+ post_process=False,
25
+ device=DEVICE
26
+ ).to(DEVICE).eval()
27
+
28
+ model = InceptionResnetV1(
29
+ pretrained="vggface2",
30
+ classify=True,
31
+ num_classes=1,
32
+ device=DEVICE
33
+ )
34
+
35
+ checkpoint = torch.load("resnetinceptionv1_epoch_32.pth", map_location=torch.device('cpu'))
36
+ model.load_state_dict(checkpoint['model_state_dict'])
37
+ model.to(DEVICE)
38
+ model.eval()
39
+
40
+ EXAMPLES_FOLDER = 'examples'
41
+ examples_names = os.listdir(EXAMPLES_FOLDER)
42
+ examples = []
43
+ for example_name in examples_names:
44
+ example_path = os.path.join(EXAMPLES_FOLDER, example_name)
45
+ label = example_name.split('_')[0]
46
+ example = {
47
+ 'path': example_path,
48
+ 'label': label
49
+ }
50
+ examples.append(example)
51
+ np.random.shuffle(examples) # shuffle
52
+
53
+ def predict(input_image:Image.Image, true_label:str):
54
+ """Predict the label of the input_image"""
55
+ face = mtcnn(input_image)
56
+ if face is None:
57
+ raise Exception('No face detected')
58
+ return "No Photoreal face detected"
59
+ face = face.unsqueeze(0) # add the batch dimension
60
+ face = F.interpolate(face, size=(256, 256), mode='bilinear', align_corners=False)
61
+
62
+ # convert the face into a numpy array to be able to plot it
63
+ prev_face = face.squeeze(0).permute(1, 2, 0).cpu().detach().int().numpy()
64
+ prev_face = prev_face.astype('uint8')
65
+
66
+ face = face.to(DEVICE)
67
+ face = face.to(torch.float32)
68
+ face = face / 255.0
69
+ face_image_to_plot = face.squeeze(0).permute(1, 2, 0).cpu().detach().int().numpy()
70
+
71
+ target_layers=[model.block8.branch1[-1]]
72
+ use_cuda = True if torch.cuda.is_available() else False
73
+ #print ("Cuda :: ", use_cuda)
74
+ cam = GradCAM(model=model, target_layers=target_layers)
75
+ #, use_cuda=use_cuda)
76
+ targets = [ClassifierOutputTarget(0)]
77
+
78
+ grayscale_cam = cam(input_tensor=face, targets=targets, eigen_smooth=True)
79
+ grayscale_cam = grayscale_cam[0, :]
80
+ visualization = show_cam_on_image(face_image_to_plot, grayscale_cam, use_rgb=True)
81
+ face_with_mask = cv2.addWeighted(prev_face, 1, visualization, 0.5, 0)
82
+
83
+ with torch.no_grad():
84
+ output = torch.sigmoid(model(face).squeeze(0))
85
+ prediction = "real" if output.item() < 0.5 else "fake"
86
+
87
+ real_prediction = 1 - output.item()
88
+ fake_prediction = output.item()
89
+
90
+ confidences = {
91
+ 'real': real_prediction,
92
+ 'fake': fake_prediction
93
+ }
94
+ return confidences, true_label, face_with_mask
95
+
96
+ title = "Deepfake Image Detection"
97
+ description = "~ AI - ML implementation for fake and real image detection..."
98
+ article = "<p style='text-align: center'>...</p>"
99
+
100
+ interface = gr.Interface(
101
+ fn=predict,
102
+ inputs=[
103
+ gr.inputs.Image(label="Input Image", type="pil"),
104
+ "text"
105
+ ],
106
+ outputs=[
107
+ gr.outputs.Label(label="Prediction Model - % of Fake or Real image detection"),
108
+ "text",
109
+ gr.outputs.Image(label="Face with Explainability", type="pil")
110
+ #ValueError: Invalid value for parameter `type`: auto. Please choose from one of: ['numpy', 'pil', 'filepath']
111
+ ],
112
+ theme = gr.themes.Soft(),
113
+ title = title,
114
+ description = description,
115
+ article = article
116
+ #examples=[[examples[i]["path"], examples[i]["label"]] for i in range(10)]
117
+ ).launch() #share=True)
deepfake-image-detection/examples.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c719c2f16bad2e71d0d33d6ae59fa646dac82a812e1a90578c4b97ef6e8f36c
3
+ size 28750945
deepfake-image-detection/requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ gradio==3.0.9
2
+ Pillow
3
+ facenet-pytorch==2.5.2
4
+ torch==1.11.0
5
+ opencv-python
6
+ grad-cam
deepfake-image-detection/resnetinceptionv1_epoch_32.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:794ebe83c6a7d7959c30c175030b4885e2b9fa175f1cc3e582236595d119f52b
3
+ size 282395989