|
import gradio as gr
|
|
import inference_2 as inference
|
|
|
|
title = " Multimodal Deepfake Detector"
|
|
description = "Detect deepfakes across **Video**, **Audio**, and **Image** modalities."
|
|
|
|
|
|
video_interface = gr.Interface(
|
|
inference.deepfakes_video_predict,
|
|
gr.Video(label="Upload Video", scale=1),
|
|
"text",
|
|
examples=["videos/aaa.mp4", "videos/bbb.mp4"],
|
|
cache_examples=False
|
|
)
|
|
|
|
image_interface = gr.Interface(
|
|
inference.deepfakes_image_predict,
|
|
gr.Image(label="Upload Image", scale=1),
|
|
"text",
|
|
examples=["images/lady.jpeg", "images/fake_image.jpg"],
|
|
cache_examples=False
|
|
)
|
|
|
|
audio_interface = gr.Interface(
|
|
inference.deepfakes_spec_predict,
|
|
gr.Audio(label="Upload Audio", scale=1),
|
|
"text",
|
|
examples=["audios/DF_E_2000027.flac", "audios/DF_E_2000031.flac"],
|
|
cache_examples=False
|
|
)
|
|
|
|
|
|
css = """
|
|
.gradio-container {
|
|
display: flex;
|
|
flex-direction: column;
|
|
align-items: center;
|
|
justify-content: flex-start;
|
|
padding: 20px;
|
|
}
|
|
.gradio-container .output {
|
|
margin-top: 10px;
|
|
width: 100%;
|
|
}
|
|
.gradio-container .input {
|
|
margin-bottom: 20px;
|
|
width: 100%;
|
|
}
|
|
"""
|
|
|
|
|
|
app = gr.TabbedInterface(
|
|
interface_list=[video_interface, audio_interface, image_interface],
|
|
tab_names=['Video Inference', 'Audio Inference', 'Image Inference'],
|
|
title=title,
|
|
css=css
|
|
)
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
app.launch(share=False)
|
|
|