deepfake / app.py
Dharshaneshwaran
updated requirements and sharable link
94eb978
raw
history blame
1.77 kB
import gradio as gr
import inference_2 as inference
# Title and Description
title = " Multimodal Deepfake Detector"
description = "Detect deepfakes and AI-generated content from videos, audio, and images using advanced AI models."
# Individual Interfaces
video_interface = gr.Interface(
inference.deepfakes_video_predict,
inputs=gr.Video(label="Upload a Video"),
outputs=gr.Textbox(label="Prediction"),
examples=["videos/aaa.mp4", "videos/bbb.mp4"],
cache_examples=False
)
image_interface = gr.Interface(
inference.deepfakes_image_predict,
inputs=gr.Image(label="Upload an Image"),
outputs=gr.Textbox(label="Prediction"),
examples=["images/lady.jpg", "images/fake_image.jpg"],
cache_examples=False
)
audio_interface = gr.Interface(
inference.deepfakes_spec_predict,
inputs=gr.Audio(label="Upload an Audio"),
outputs=gr.Textbox(label="Prediction"),
examples=["audios/DF_E_2000027.flac", "audios/DF_E_2000031.flac"],
cache_examples=False
)
ai_image_detector = gr.Interface(
fn=inference.detect_ai_generated_image,
inputs=gr.Image(label="Upload an Image"),
outputs=gr.Textbox(label="AI-Generated or Human-Created"),
examples=["images/ai_generated.jpg", "images/real.jpeg"],
cache_examples=False
)
# 🧩 Full UI with Title & Tabs
with gr.Blocks(title=title) as app:
gr.Markdown(f"# {title}")
gr.Markdown(description)
with gr.Tab("🎬 Video Inference"):
video_interface.render()
with gr.Tab("🎧 Audio Inference"):
audio_interface.render()
with gr.Tab("πŸ–ΌοΈ Image Inference"):
image_interface.render()
with gr.Tab("πŸ€– AI Image Detector"):
ai_image_detector.render()
if __name__ == '__main__':
app.launch(share=True)