File size: 8,485 Bytes
c6185a5
 
 
 
 
 
 
 
02c225a
 
 
 
c6185a5
84e01c4
 
 
18b8b5e
 
 
 
c3f0c45
e71a1e1
c3f0c45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e71a1e1
 
 
6a29f7f
18b8b5e
6a29f7f
f21d5ff
17e2b91
e71a1e1
c3f0c45
 
84e01c4
 
 
 
 
 
 
 
 
 
 
c6185a5
 
 
f0a93ea
 
c6185a5
 
 
 
 
 
 
 
 
 
 
 
 
248f463
c6185a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
817b56c
c6185a5
 
 
 
02c225a
 
c6185a5
 
 
 
02c225a
 
 
 
d797fc2
 
 
02c225a
 
 
 
 
 
 
c6185a5
 
 
 
 
 
 
 
 
 
 
02c225a
c6185a5
19b30ae
 
 
987a239
6a29f7f
53db291
11f0996
84a7638
c6185a5
 
28a8269
 
c6185a5
 
48f240d
bc231a4
29e113e
121f483
1b19899
e71a1e1
19b30ae
 
 
53db291
 
6a29f7f
9844682
19b30ae
 
 
cbc66e3
 
 
 
 
 
 
84a7638
 
19b30ae
52317a5
143d086
 
19b30ae
f0cf3e1
9844682
 
 
 
 
 
 
 
 
 
 
48f240d
84a7638
 
 
53db291
19b30ae
18b8b5e
53db291
84a7638
19b30ae
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
import gradio as gr
import torch
import torch.nn.functional as F
from facenet_pytorch import MTCNN, InceptionResnetV1
import os
import numpy as np
from PIL import Image
import zipfile
import cv2
from pytorch_grad_cam import GradCAM
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
from pytorch_grad_cam.utils.image import show_cam_on_image

#ai-pict-detect
from transformers import pipeline

#from typing import Iterable
#from gradio.themes.base import Base
#from gradio.themes.utils import colors, fonts, sizes
#import time

'''
class Seafoam(Base):
    def __init__(
        self,
        *,
        primary_hue: colors.Color | str = colors.emerald,
        secondary_hue: colors.Color | str = colors.blue,
        neutral_hue: colors.Color | str = colors.blue,
        spacing_size: sizes.Size | str = sizes.spacing_md,
        radius_size: sizes.Size | str = sizes.radius_md,
        text_size: sizes.Size | str = sizes.text_lg,
        font: fonts.Font
        | str
        | Iterable[fonts.Font | str] = (
            fonts.GoogleFont("Quicksand"),
            "ui-sans-serif",
            "sans-serif",
        ),
        font_mono: fonts.Font
        | str
        | Iterable[fonts.Font | str] = (
            fonts.GoogleFont("IBM Plex Mono"),
            "ui-monospace",
            "monospace",
        ),
    ):
        super().__init__(
            primary_hue=primary_hue,
            secondary_hue=secondary_hue,
            neutral_hue=neutral_hue,
            spacing_size=spacing_size,
            radius_size=radius_size,
            text_size=text_size,
            font=font,
            font_mono=font_mono,
        )
        super().set(
            body_background_fill="repeating-linear-gradient(45deg, *primary_200, *primary_200 10px, *primary_50 10px, *primary_50 20px)",
            body_background_fill_dark="repeating-linear-gradient(45deg, *primary_800, *primary_800 10px, *primary_900 10px, *primary_900 20px)",
            button_primary_background_fill="linear-gradient(90deg, *primary_300, *secondary_400)",
            button_primary_background_fill_hover="linear-gradient(90deg, *primary_200, *secondary_300)",
            button_primary_text_color="white",
            button_primary_background_fill_dark="linear-gradient(90deg, *primary_600, *secondary_800)",
            slider_color="*secondary_300",
            slider_color_dark="*secondary_600",
            block_title_text_weight="600",
            block_border_width="3px",
            block_shadow="*shadow_drop_lg",
            button_shadow="*shadow_drop_lg",
            button_large_padding="32px",
        )

my_theme = Seafoam()
'''

#my_theme = gr.Theme.from_hub("gradio/seafoam")

my_theme = gr.themes.Monochrome()
#my_theme = gr.themes.Glass()
#my_theme = gr.themes.Default(primary_hue="red", secondary_hue="pink")



pipe = pipeline("image-classification", "nightfury/AI-picture-detector")

def image_classifier(image):
    outputs = pipe(image)
    results = {}
    for result in outputs:
        results[result['label']] = result['score']
    return results
#ai-pict-detect


with zipfile.ZipFile("examples.zip","r") as zip_ref:
    zip_ref.extractall(".")

DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
'''cuda:0'''
mtcnn = MTCNN(
    select_largest=False,
    post_process=False,
    device=DEVICE
).to(DEVICE).eval()

model = InceptionResnetV1(
    pretrained="vggface2",
    classify=True,
    num_classes=1,
    device=DEVICE
)

checkpoint = torch.load("resnetinceptionv1_epoch_32.pth", map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['model_state_dict'])
model.to(DEVICE)
model.eval()

EXAMPLES_FOLDER = 'examples'
examples_names = os.listdir(EXAMPLES_FOLDER)
examples = []
for example_name in examples_names:
    example_path = os.path.join(EXAMPLES_FOLDER, example_name)
    label = example_name.split('_')[0]
    example = {
        'path': example_path,
        'label': label
    }
    examples.append(example)
np.random.shuffle(examples) # shuffle

def predict(input_image:Image.Image, true_label:str):
    """Predict the label of the input_image"""
    face = mtcnn(input_image)
    if face is None:
        raise Exception('No face detected')
        return "No Photoreal face detected"
    face = face.unsqueeze(0) # add the batch dimension
    face = F.interpolate(face, size=(256, 256), mode='bilinear', align_corners=False)
    
    # convert the face into a numpy array to be able to plot it
    prev_face = face.squeeze(0).permute(1, 2, 0).cpu().detach().int().numpy()
    prev_face = prev_face.astype('uint8')

    face = face.to(DEVICE)
    face = face.to(torch.float32)
    face = face / 255.0
    face_image_to_plot = face.squeeze(0).permute(1, 2, 0).cpu().detach().int().numpy()

    target_layers=[model.block8.branch1[-1]]
    use_cuda = True if torch.cuda.is_available() else False
    #print ("Cuda :: ", use_cuda)
    cam = GradCAM(model=model, target_layers=target_layers) 
    #, use_cuda=use_cuda)
    targets = [ClassifierOutputTarget(0)]

    grayscale_cam = cam(input_tensor=face, targets=targets, eigen_smooth=True)
    grayscale_cam = grayscale_cam[0, :]
    visualization = show_cam_on_image(face_image_to_plot, grayscale_cam, use_rgb=True)
    face_with_mask = cv2.addWeighted(prev_face, 1, visualization, 0.5, 0)

    with torch.no_grad():
        output = torch.sigmoid(model(face).squeeze(0))
        prediction = "real" if output.item() < 0.5 else "fake"
        
        real_prediction = 1 - output.item()
        fake_prediction = output.item()
        
        confidences = {
            'real': real_prediction,
            'fake': fake_prediction
        }
    return confidences, true_label, face_with_mask

title1 = "Deepfake Image Detection"
description1 = "~ AI - ML implementation for fake and real image detection..."
article1 = "<p style='text-align: center'>...</p>"

#interface1 = gr.Interface(fn=predict, inputs=gr.Image(type="pil"), outputs="label", theme = my_theme, title=title1, description=description1, article = article1)


interface1 = gr.Interface(
    fn=predict,
    inputs=[
        gr.inputs.Image(label="Input Image", type="pil"),
        "text"
    ],
    outputs=[
        gr.outputs.Label(label="Prediction Model - % of Fake or Real image detection"),
        "text",
        gr.outputs.Image(label="Face with Explainability", type="pil")
        #ValueError: Invalid value for parameter `type`: auto. Please choose from one of: ['numpy', 'pil', 'filepath']
    ],
    theme = my_theme, #gr.themes.Soft(),
    title = title1,
    description = description1,
    article = article1
    #examples=[[examples[i]["path"], examples[i]["label"]] for i in range(10)]
)


title2 = "AI Generated Image Detection"
description2 = "~ AI - ML implementation for AI image detection using older models such as VQGAN+CLIP."
article2 = """
NOTE:
- To detect pictures generated using older models such as VQGAN+CLIP, please use the updated version of this detector instead.
- In this model i'm using a ViT model to predict whether an artistic image was generated using AI or not.
- The training dataset didn't include any samples generated from Midjourney 5, SDXL, or DALLE-3. But was trained on outputs of their predecessors.
- Scope of this tool is 'artistic images'; that is to say, it is not a deepfake photo detector, and general computer imagery (webcams, screenshots, etc.) may throw it off.
- The potential indicator for this tool is to serve to detect whether an image was AI-generated or not.
- Images scoring as very probably artificial (e.g. 90% or higher) could be referred to a human expert for further investigation, if needed.
"""

interface2 = gr.Interface(fn=image_classifier, inputs=gr.Image(type="pil"), outputs="label", theme = my_theme, title=title2, description=description2, article = article2)

#demo.launch(show_api=False)

'''
interface2 = gr.Interface(
    fn=image_classifier,
    inputs=[
        gr.inputs.Image(label="Input Image", type="pil"),
        "text"
    ],
    outputs=[
        gr.outputs.Label(label="Is it Artificial or Human"),
        "text",
        #ValueError: Invalid value for parameter `type`: auto. Please choose from one of: ['numpy', 'pil', 'filepath']
    ],
    
    theme = gr.themes.Soft(),
    title = title1,
    description = description1,
    article = article1
)
'''
    
gr.TabbedInterface(
    [interface1, interface2], ["Deepfake Image Detection", "AI Image Detection"]
).launch() #share=True)