|
import gradio as gr
|
|
import inference_2 as inference
|
|
import os
|
|
import sys
|
|
import asyncio
|
|
|
|
|
|
if sys.platform == "win32":
|
|
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
|
|
|
|
|
|
custom_css = """
|
|
/* ChatGPT-style global container */
|
|
.gradio-container {
|
|
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Roboto, Oxygen, Ubuntu, Cantarell, sans-serif !important;
|
|
background: #212121 !important;
|
|
color: #ffffff !important;
|
|
margin: 0 !important;
|
|
padding: 0 !important;
|
|
height: 100vh !important;
|
|
}
|
|
|
|
/* ChatGPT-style layout */
|
|
.chat-layout {
|
|
display: flex !important;
|
|
height: 100vh !important;
|
|
}
|
|
|
|
/* ChatGPT-style sidebar */
|
|
.chat-sidebar {
|
|
width: 260px !important;
|
|
background: #171717 !important;
|
|
border-right: 1px solid #2e2e2e !important;
|
|
padding: 1rem !important;
|
|
overflow-y: auto !important;
|
|
flex-shrink: 0 !important;
|
|
}
|
|
|
|
.sidebar-header {
|
|
padding: 1rem 0 !important;
|
|
border-bottom: 1px solid #2e2e2e !important;
|
|
margin-bottom: 1rem !important;
|
|
}
|
|
|
|
.sidebar-title {
|
|
font-size: 1.1rem !important;
|
|
font-weight: 600 !important;
|
|
color: #ffffff !important;
|
|
margin: 0 !important;
|
|
}
|
|
|
|
/* Sidebar menu items */
|
|
.sidebar-item {
|
|
display: flex !important;
|
|
align-items: center !important;
|
|
padding: 0.75rem 1rem !important;
|
|
margin: 0.25rem 0 !important;
|
|
border-radius: 8px !important;
|
|
cursor: pointer !important;
|
|
transition: background-color 0.2s ease !important;
|
|
color: #b4b4b4 !important;
|
|
text-decoration: none !important;
|
|
width: 100% !important;
|
|
border: none !important;
|
|
background: transparent !important;
|
|
text-align: left !important;
|
|
}
|
|
|
|
.sidebar-item:hover {
|
|
background: #2a2a2a !important;
|
|
color: #ffffff !important;
|
|
}
|
|
|
|
.sidebar-item.active {
|
|
background: #2a2a2a !important;
|
|
color: #ffffff !important;
|
|
}
|
|
|
|
/* ChatGPT-style main content */
|
|
.chat-main {
|
|
flex: 1 !important;
|
|
background: #212121 !important;
|
|
overflow-y: auto !important;
|
|
display: flex !important;
|
|
flex-direction: column !important;
|
|
}
|
|
|
|
/* ChatGPT-style header */
|
|
.chat-header {
|
|
background: #2a2a2a !important;
|
|
border-bottom: 1px solid #2e2e2e !important;
|
|
padding: 1rem 2rem !important;
|
|
flex-shrink: 0 !important;
|
|
}
|
|
|
|
.chat-title {
|
|
font-size: 1.2rem !important;
|
|
font-weight: 600 !important;
|
|
color: #ffffff !important;
|
|
margin: 0 !important;
|
|
}
|
|
|
|
.chat-subtitle {
|
|
color: #b4b4b4 !important;
|
|
font-size: 0.9rem !important;
|
|
margin-top: 0.25rem !important;
|
|
}
|
|
|
|
/* ChatGPT-style content area */
|
|
.chat-content {
|
|
flex: 1 !important;
|
|
padding: 2rem !important;
|
|
max-width: 800px !important;
|
|
margin: 0 auto !important;
|
|
width: 100% !important;
|
|
box-sizing: border-box !important;
|
|
}
|
|
|
|
/* ChatGPT-style cards */
|
|
.chat-card {
|
|
background: #2a2a2a !important;
|
|
border: 1px solid #2e2e2e !important;
|
|
border-radius: 12px !important;
|
|
padding: 1.5rem !important;
|
|
margin: 1rem 0 !important;
|
|
transition: border-color 0.2s ease !important;
|
|
}
|
|
|
|
.chat-card:hover {
|
|
border-color: #404040 !important;
|
|
}
|
|
|
|
/* ChatGPT-style inputs */
|
|
.chat-input {
|
|
background: #171717 !important;
|
|
border: 1px solid #2e2e2e !important;
|
|
border-radius: 8px !important;
|
|
padding: 1rem !important;
|
|
color: #ffffff !important;
|
|
font-size: 0.9rem !important;
|
|
transition: border-color 0.2s ease !important;
|
|
}
|
|
|
|
.chat-input:focus {
|
|
border-color: #0ea5e9 !important;
|
|
box-shadow: 0 0 0 3px rgba(14, 165, 233, 0.1) !important;
|
|
outline: none !important;
|
|
}
|
|
|
|
/* ChatGPT-style buttons */
|
|
.chat-button {
|
|
background: #0ea5e9 !important;
|
|
color: #ffffff !important;
|
|
border: none !important;
|
|
border-radius: 8px !important;
|
|
padding: 0.75rem 1.5rem !important;
|
|
font-weight: 500 !important;
|
|
font-size: 0.9rem !important;
|
|
cursor: pointer !important;
|
|
transition: all 0.2s ease !important;
|
|
display: inline-flex !important;
|
|
align-items: center !important;
|
|
gap: 0.5rem !important;
|
|
}
|
|
|
|
.chat-button:hover {
|
|
background: #0284c7 !important;
|
|
transform: translateY(-1px) !important;
|
|
box-shadow: 0 4px 12px rgba(14, 165, 233, 0.3) !important;
|
|
}
|
|
|
|
/* ChatGPT-style output */
|
|
.chat-output {
|
|
background: #171717 !important;
|
|
border: 1px solid #2e2e2e !important;
|
|
border-radius: 8px !important;
|
|
padding: 1rem !important;
|
|
font-family: 'SF Mono', Monaco, 'Cascadia Code', 'Roboto Mono', Consolas, 'Courier New', monospace !important;
|
|
font-size: 0.85rem !important;
|
|
line-height: 1.5 !important;
|
|
color: #ffffff !important;
|
|
min-height: 200px !important;
|
|
white-space: pre-wrap !important;
|
|
}
|
|
|
|
/* Upload area styling */
|
|
.upload-area {
|
|
border: 2px dashed #2e2e2e !important;
|
|
border-radius: 8px !important;
|
|
padding: 2rem !important;
|
|
text-align: center !important;
|
|
background: #171717 !important;
|
|
transition: all 0.2s ease !important;
|
|
color: #b4b4b4 !important;
|
|
}
|
|
|
|
.upload-area:hover {
|
|
border-color: #0ea5e9 !important;
|
|
background: #1a1a1a !important;
|
|
}
|
|
|
|
/* ChatGPT-style accordion */
|
|
.chat-accordion {
|
|
background: #2a2a2a !important;
|
|
border: 1px solid #2e2e2e !important;
|
|
border-radius: 8px !important;
|
|
margin-top: 1rem !important;
|
|
}
|
|
|
|
.chat-accordion summary {
|
|
padding: 1rem !important;
|
|
font-weight: 500 !important;
|
|
cursor: pointer !important;
|
|
background: #2a2a2a !important;
|
|
border-radius: 8px 8px 0 0 !important;
|
|
color: #ffffff !important;
|
|
}
|
|
|
|
.chat-accordion[open] summary {
|
|
border-bottom: 1px solid #2e2e2e !important;
|
|
}
|
|
|
|
/* Responsive design */
|
|
@media (max-width: 768px) {
|
|
.chat-layout {
|
|
flex-direction: column !important;
|
|
}
|
|
|
|
.chat-sidebar {
|
|
width: 100% !important;
|
|
height: auto !important;
|
|
border-right: none !important;
|
|
border-bottom: 1px solid #2e2e2e !important;
|
|
}
|
|
|
|
.chat-content {
|
|
padding: 1rem !important;
|
|
}
|
|
}
|
|
"""
|
|
|
|
|
|
with gr.Blocks(
|
|
theme=gr.themes.Base(
|
|
primary_hue="blue",
|
|
secondary_hue="gray",
|
|
neutral_hue="gray"
|
|
),
|
|
css=custom_css,
|
|
title="DeepSecure AI"
|
|
) as app:
|
|
|
|
|
|
with gr.Row(elem_classes="chat-layout"):
|
|
|
|
|
|
with gr.Column(elem_classes="chat-sidebar", scale=0):
|
|
with gr.Column(elem_classes="sidebar-header"):
|
|
gr.HTML('<div class="sidebar-title">🛡️ DeepSecure AI</div>')
|
|
|
|
|
|
analysis_type = gr.State("video")
|
|
|
|
|
|
video_btn_sidebar = gr.Button(
|
|
"🎬 Video Analysis",
|
|
elem_classes="sidebar-item active",
|
|
variant="secondary",
|
|
size="sm"
|
|
)
|
|
audio_btn_sidebar = gr.Button(
|
|
"🎵 Audio Analysis",
|
|
elem_classes="sidebar-item",
|
|
variant="secondary",
|
|
size="sm"
|
|
)
|
|
image_btn_sidebar = gr.Button(
|
|
"🖼️ Image Analysis",
|
|
elem_classes="sidebar-item",
|
|
variant="secondary",
|
|
size="sm"
|
|
)
|
|
|
|
|
|
with gr.Accordion("📊 Model Stats", open=False, elem_classes="chat-accordion"):
|
|
gr.HTML("""
|
|
<div style="color: #b4b4b4; font-size: 0.8rem; line-height: 1.4;">
|
|
<strong>Video:</strong> 96.2% accuracy<br>
|
|
<strong>Audio:</strong> 94.8% accuracy<br>
|
|
<strong>Image:</strong> 97.1% accuracy
|
|
</div>
|
|
""")
|
|
|
|
|
|
with gr.Column(elem_classes="chat-main", scale=1):
|
|
|
|
|
|
with gr.Row(elem_classes="chat-header"):
|
|
current_title = gr.HTML('<div class="chat-title">Video Deepfake Detection</div>')
|
|
current_subtitle = gr.HTML('<div class="chat-subtitle">Upload a video file to analyze for potential deepfake manipulation</div>')
|
|
|
|
|
|
with gr.Column(elem_classes="chat-content"):
|
|
|
|
|
|
with gr.Group():
|
|
|
|
|
|
video_content = gr.Column(visible=True)
|
|
with video_content:
|
|
with gr.Column(elem_classes="chat-card"):
|
|
gr.Markdown("### Upload Video File")
|
|
gr.Markdown("*Drag and drop or click to browse • Supported: MP4, AVI, MOV, MKV*")
|
|
|
|
video_input = gr.Video(
|
|
label="",
|
|
elem_classes="upload-area",
|
|
height=250
|
|
)
|
|
|
|
video_btn = gr.Button(
|
|
"🔍 Analyze Video",
|
|
elem_classes="chat-button",
|
|
size="lg",
|
|
variant="primary"
|
|
)
|
|
|
|
video_output = gr.Textbox(
|
|
label="Analysis Results",
|
|
elem_classes="chat-output",
|
|
lines=10,
|
|
placeholder="Upload a video and click 'Analyze Video' to see detailed results here...",
|
|
interactive=False
|
|
)
|
|
|
|
|
|
video_examples = []
|
|
if os.path.exists("videos/aaa.mp4"):
|
|
video_examples.append("videos/aaa.mp4")
|
|
if os.path.exists("videos/bbb.mp4"):
|
|
video_examples.append("videos/bbb.mp4")
|
|
|
|
if video_examples:
|
|
with gr.Accordion("📁 Try Sample Videos", open=False, elem_classes="chat-accordion"):
|
|
gr.Examples(
|
|
examples=video_examples,
|
|
inputs=video_input,
|
|
label="Sample videos for testing:"
|
|
)
|
|
|
|
|
|
audio_content = gr.Column(visible=False)
|
|
with audio_content:
|
|
with gr.Column(elem_classes="chat-card"):
|
|
gr.Markdown("### Upload Audio File")
|
|
gr.Markdown("*Drag and drop or click to browse • Supported: WAV, MP3, FLAC, M4A*")
|
|
|
|
audio_input = gr.Audio(
|
|
label="",
|
|
elem_classes="upload-area"
|
|
)
|
|
|
|
audio_btn = gr.Button(
|
|
"🔍 Analyze Audio",
|
|
elem_classes="chat-button",
|
|
size="lg",
|
|
variant="primary"
|
|
)
|
|
|
|
audio_output = gr.Textbox(
|
|
label="Analysis Results",
|
|
elem_classes="chat-output",
|
|
lines=10,
|
|
placeholder="Upload an audio file and click 'Analyze Audio' to see detailed results here...",
|
|
interactive=False
|
|
)
|
|
|
|
|
|
audio_examples = []
|
|
if os.path.exists("audios/DF_E_2000027.flac"):
|
|
audio_examples.append("audios/DF_E_2000027.flac")
|
|
if os.path.exists("audios/DF_E_2000031.flac"):
|
|
audio_examples.append("audios/DF_E_2000031.flac")
|
|
|
|
if audio_examples:
|
|
with gr.Accordion("📁 Try Sample Audio", open=False, elem_classes="chat-accordion"):
|
|
gr.Examples(
|
|
examples=audio_examples,
|
|
inputs=audio_input,
|
|
label="Sample audio files for testing:"
|
|
)
|
|
|
|
|
|
image_content = gr.Column(visible=False)
|
|
with image_content:
|
|
with gr.Column(elem_classes="chat-card"):
|
|
gr.Markdown("### Upload Image File")
|
|
gr.Markdown("*Drag and drop or click to browse • Supported: JPG, PNG, WEBP, BMP*")
|
|
|
|
image_input = gr.Image(
|
|
label="",
|
|
elem_classes="upload-area",
|
|
height=300
|
|
)
|
|
|
|
image_btn = gr.Button(
|
|
"🔍 Analyze Image",
|
|
elem_classes="chat-button",
|
|
size="lg",
|
|
variant="primary"
|
|
)
|
|
|
|
image_output = gr.Textbox(
|
|
label="Analysis Results",
|
|
elem_classes="chat-output",
|
|
lines=10,
|
|
placeholder="Upload an image and click 'Analyze Image' to see detailed results here...",
|
|
interactive=False
|
|
)
|
|
|
|
|
|
image_examples = []
|
|
if os.path.exists("images/lady.jpg"):
|
|
image_examples.append("images/lady.jpg")
|
|
if os.path.exists("images/fake_image.jpg"):
|
|
image_examples.append("images/fake_image.jpg")
|
|
|
|
if image_examples:
|
|
with gr.Accordion("📁 Try Sample Images", open=False, elem_classes="chat-accordion"):
|
|
gr.Examples(
|
|
examples=image_examples,
|
|
inputs=image_input,
|
|
label="Sample images for testing:"
|
|
)
|
|
|
|
|
|
def switch_to_video():
|
|
return (
|
|
gr.update(visible=True),
|
|
gr.update(visible=False),
|
|
gr.update(visible=False),
|
|
'<div class="chat-title">Video Deepfake Detection</div>',
|
|
'<div class="chat-subtitle">Upload a video file to analyze for potential deepfake manipulation</div>',
|
|
"video"
|
|
)
|
|
|
|
def switch_to_audio():
|
|
return (
|
|
gr.update(visible=False),
|
|
gr.update(visible=True),
|
|
gr.update(visible=False),
|
|
'<div class="chat-title">Audio Deepfake Detection</div>',
|
|
'<div class="chat-subtitle">Upload an audio file to detect voice cloning or synthetic speech</div>',
|
|
"audio"
|
|
)
|
|
|
|
def switch_to_image():
|
|
return (
|
|
gr.update(visible=False),
|
|
gr.update(visible=False),
|
|
gr.update(visible=True),
|
|
'<div class="chat-title">Image Deepfake Detection</div>',
|
|
'<div class="chat-subtitle">Upload an image to detect face swaps, GANs, or other manipulations</div>',
|
|
"image"
|
|
)
|
|
|
|
|
|
video_btn_sidebar.click(
|
|
switch_to_video,
|
|
outputs=[video_content, audio_content, image_content, current_title, current_subtitle, analysis_type]
|
|
)
|
|
|
|
audio_btn_sidebar.click(
|
|
switch_to_audio,
|
|
outputs=[video_content, audio_content, image_content, current_title, current_subtitle, analysis_type]
|
|
)
|
|
|
|
image_btn_sidebar.click(
|
|
switch_to_image,
|
|
outputs=[video_content, audio_content, image_content, current_title, current_subtitle, analysis_type]
|
|
)
|
|
|
|
|
|
def safe_video_predict(video):
|
|
if video is None:
|
|
return "⚠️ Please upload a video file first."
|
|
try:
|
|
result = inference.deepfakes_video_predict(video)
|
|
return f"🎬 VIDEO ANALYSIS COMPLETE\n{'='*50}\n\n✅ {result}\n\n📊 Analysis performed using ResNext-50 + LSTM model\n🎯 Model accuracy: 96.2%\n⏱️ Processing time: Variable based on video length"
|
|
except Exception as e:
|
|
return f"❌ VIDEO ANALYSIS FAILED\n{'='*50}\n\n🔍 Error Details:\n{str(e)}\n\n💡 Troubleshooting:\n• Ensure video format is supported (MP4, AVI, MOV, MKV)\n• Check if file is corrupted\n• Try a smaller file size"
|
|
|
|
def safe_audio_predict(audio):
|
|
if audio is None:
|
|
return "⚠️ Please upload an audio file first."
|
|
try:
|
|
result = inference.deepfakes_spec_predict(audio)
|
|
return f"🎵 AUDIO ANALYSIS COMPLETE\n{'='*50}\n\n✅ {result}\n\n📊 Analysis performed using Spectral CNN + Transformer model\n🎯 Model accuracy: 94.8%\n⏱️ Processing time: ~5-15 seconds"
|
|
except Exception as e:
|
|
return f"❌ AUDIO ANALYSIS FAILED\n{'='*50}\n\n🔍 Error Details:\n{str(e)}\n\n💡 Troubleshooting:\n• Ensure audio format is supported (WAV, MP3, FLAC, M4A)\n• Check if file is corrupted\n• Try converting to WAV format"
|
|
|
|
def safe_image_predict(image):
|
|
if image is None:
|
|
return "⚠️ Please upload an image file first."
|
|
try:
|
|
result = inference.deepfakes_image_predict(image)
|
|
return f"🖼️ IMAGE ANALYSIS COMPLETE\n{'='*50}\n\n✅ {result}\n\n📊 Analysis performed using EfficientNet-B4 + XceptionNet model\n🎯 Model accuracy: 97.1%\n⏱️ Processing time: ~2-5 seconds"
|
|
except Exception as e:
|
|
return f"❌ IMAGE ANALYSIS FAILED\n{'='*50}\n\n🔍 Error Details:\n{str(e)}\n\n💡 Troubleshooting:\n• Ensure image format is supported (JPG, PNG, WEBP, BMP)\n• Check if file is corrupted\n• Try a different image file"
|
|
|
|
|
|
video_btn.click(safe_video_predict, video_input, video_output, show_progress=True)
|
|
audio_btn.click(safe_audio_predict, audio_input, audio_output, show_progress=True)
|
|
image_btn.click(safe_image_predict, image_input, image_output, show_progress=True)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
import random
|
|
|
|
|
|
ports_to_try = [7862, 7863, 7864, 7865, 8000, 8001, 8002]
|
|
|
|
for port in ports_to_try:
|
|
try:
|
|
print(f"Trying to start server on port {port}...")
|
|
app.launch(
|
|
server_name="127.0.0.1",
|
|
server_port=port,
|
|
share=False,
|
|
inbrowser=True,
|
|
prevent_thread_lock=False,
|
|
show_error=True,
|
|
quiet=False,
|
|
max_threads=40
|
|
)
|
|
break
|
|
except OSError as e:
|
|
if "port" in str(e).lower():
|
|
print(f"Port {port} is busy, trying next port...")
|
|
continue
|
|
else:
|
|
print(f"Error starting server: {e}")
|
|
break
|
|
except Exception as e:
|
|
print(f"Unexpected error: {e}")
|
|
break
|
|
else:
|
|
print("All ports are busy. Please close other applications and try again.")
|
|
|