File size: 1,661 Bytes
f365f9c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 |
import gradio as gr
import inference_2 as inference
title = " Multimodal Deepfake Detector"
description = "Detect deepfakes across **Video**, **Audio**, and **Image** modalities."
# Update layout with proportional scaling and spacing
video_interface = gr.Interface(
inference.deepfakes_video_predict,
gr.Video(label="Upload Video", scale=1),
"text",
examples=["videos/aaa.mp4", "videos/bbb.mp4"],
cache_examples=False
)
image_interface = gr.Interface(
inference.deepfakes_image_predict,
gr.Image(label="Upload Image", scale=1),
"text",
examples=["images/lady.jpeg", "images/fake_image.jpg"],
cache_examples=False
)
audio_interface = gr.Interface(
inference.deepfakes_spec_predict,
gr.Audio(label="Upload Audio", scale=1),
"text",
examples=["audios/DF_E_2000027.flac", "audios/DF_E_2000031.flac"],
cache_examples=False
)
# Apply CSS for consistent spacing and alignment
css = """
.gradio-container {
display: flex;
flex-direction: column;
align-items: center;
justify-content: flex-start;
padding: 20px;
}
.gradio-container .output {
margin-top: 10px;
width: 100%;
}
.gradio-container .input {
margin-bottom: 20px;
width: 100%;
}
"""
# Ensure the app layout is responsive
app = gr.TabbedInterface(
interface_list=[video_interface, audio_interface, image_interface],
tab_names=['Video Inference', 'Audio Inference', 'Image Inference'],
title=title,
css=css
)
# Add accessibility features (e.g., labels for inputs and outputs)
if __name__ == '__main__':
app.launch(share=False)
|