Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,22 +1,24 @@
|
|
| 1 |
-
import gradio as gr
|
| 2 |
-
import cv2
|
| 3 |
-
import os
|
| 4 |
-
import numpy as np
|
| 5 |
-
from datetime import datetime
|
| 6 |
-
import matplotlib.pyplot as plt
|
|
|
|
|
|
|
| 7 |
from services.detection_service import detect_faults_solar, detect_faults_windmill
|
| 8 |
from services.anomaly_service import track_faults, predict_fault
|
| 9 |
from models.solar_model import load_solar_model
|
| 10 |
from models.windmill_model import load_windmill_model
|
| 11 |
from config.settings import VIDEO_FOLDER
|
| 12 |
|
| 13 |
-
# Initialize global state
|
| 14 |
-
logs = []
|
| 15 |
-
fault_counts = []
|
| 16 |
-
frame_numbers = []
|
| 17 |
-
total_detected = 0
|
| 18 |
|
| 19 |
-
# Custom CSS to
|
| 20 |
css = """
|
| 21 |
<style>
|
| 22 |
.main-header {
|
|
@@ -58,41 +60,42 @@ css = """
|
|
| 58 |
</style>
|
| 59 |
"""
|
| 60 |
|
|
|
|
| 61 |
def process_video(video_path, detection_type):
|
| 62 |
global logs, fault_counts, frame_numbers, total_detected
|
| 63 |
-
cap = cv2.VideoCapture(video_path)
|
| 64 |
if not cap.isOpened():
|
| 65 |
return "Error: Could not open video file.", None, None, None, None, None
|
| 66 |
|
| 67 |
-
model = load_solar_model() if detection_type == "Solar Panel" else load_windmill_model()
|
| 68 |
frame_count = 0
|
| 69 |
|
| 70 |
-
# Clear previous state for a new video
|
| 71 |
logs.clear()
|
| 72 |
fault_counts.clear()
|
| 73 |
frame_numbers.clear()
|
| 74 |
total_detected = 0
|
| 75 |
|
| 76 |
while cap.isOpened():
|
| 77 |
-
ret, frame = cap.read()
|
| 78 |
if not ret:
|
| 79 |
break
|
| 80 |
|
| 81 |
frame_count += 1
|
| 82 |
-
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 83 |
|
| 84 |
-
# Detect faults
|
| 85 |
faults = detect_faults_solar(model, frame_rgb) if detection_type == "Solar Panel" else detect_faults_windmill(model, frame_rgb)
|
| 86 |
num_faults = len(faults)
|
| 87 |
|
| 88 |
-
# Draw bounding boxes and labels
|
| 89 |
for fault in faults:
|
| 90 |
x, y = int(fault['location'][0]), int(fault['location'][1])
|
| 91 |
-
cv2.rectangle(frame_rgb, (x-30, y-30), (x+30, y+30), (255, 0, 0), 2)
|
| 92 |
cv2.putText(frame_rgb, f"{fault['type']}", (x, y-40),
|
| 93 |
-
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2)
|
| 94 |
|
| 95 |
-
# Update state
|
| 96 |
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 97 |
log_entry = f"{timestamp} - Frame {frame_count} - Faults: {num_faults}"
|
| 98 |
logs.append(log_entry)
|
|
@@ -100,19 +103,19 @@ def process_video(video_path, detection_type):
|
|
| 100 |
fault_counts.append(num_faults)
|
| 101 |
frame_numbers.append(frame_count)
|
| 102 |
|
| 103 |
-
# Limit to last 100 frames
|
| 104 |
if len(frame_numbers) > 100:
|
| 105 |
frame_numbers.pop(0)
|
| 106 |
fault_counts.pop(0)
|
| 107 |
|
| 108 |
-
# Prepare outputs
|
| 109 |
video_output = frame_rgb
|
| 110 |
metrics = f"faults: {num_faults}<br>total_detected: {total_detected}"
|
| 111 |
-
live_logs = "<br>".join(logs[-20:]) #
|
| 112 |
last_5_events = "<br>".join(logs[-5:]) if logs else "No events yet"
|
| 113 |
prediction = "Potential fault escalation detected!" if predict_fault(fault_counts) else ""
|
| 114 |
|
| 115 |
-
# Generate trends graph
|
| 116 |
fig, ax = plt.subplots(figsize=(6, 3))
|
| 117 |
ax.plot(frame_numbers, fault_counts, marker='o', color='blue')
|
| 118 |
ax.set_title("Faults Over Time", fontsize=10)
|
|
@@ -124,55 +127,55 @@ def process_video(video_path, detection_type):
|
|
| 124 |
|
| 125 |
return video_output, metrics, live_logs, last_5_events, fig, prediction
|
| 126 |
|
| 127 |
-
# Gradio interface
|
| 128 |
with gr.Blocks(css=css) as demo:
|
| 129 |
-
gr.Markdown(
|
| 130 |
-
gr.Markdown(
|
| 131 |
|
| 132 |
with gr.Row():
|
| 133 |
with gr.Column(scale=3):
|
| 134 |
with gr.Column():
|
| 135 |
-
gr.Markdown(
|
| 136 |
gr.Markdown('<div class="section-box">', unsafe_allow_html=True)
|
| 137 |
-
video_output = gr.Image(label="", interactive=False)
|
| 138 |
gr.Markdown('</div>', unsafe_allow_html=True)
|
| 139 |
with gr.Column(scale=1):
|
| 140 |
with gr.Column():
|
| 141 |
-
gr.Markdown(
|
| 142 |
gr.Markdown('<div class="section-box">', unsafe_allow_html=True)
|
| 143 |
-
metrics_output = gr.Markdown(label=""
|
| 144 |
-
prediction_output = gr.Markdown(label="")
|
| 145 |
gr.Markdown('</div>', unsafe_allow_html=True)
|
| 146 |
|
| 147 |
with gr.Row():
|
| 148 |
with gr.Column(scale=1):
|
| 149 |
with gr.Column():
|
| 150 |
-
gr.Markdown(
|
| 151 |
gr.Markdown('<div class="section-box">', unsafe_allow_html=True)
|
| 152 |
-
logs_output = gr.Markdown(label=""
|
| 153 |
gr.Markdown('</div>', unsafe_allow_html=True)
|
| 154 |
with gr.Column():
|
| 155 |
-
gr.Markdown(
|
| 156 |
gr.Markdown('<div class="section-box">', unsafe_allow_html=True)
|
| 157 |
-
events_output = gr.Markdown(label=""
|
| 158 |
gr.Markdown('</div>', unsafe_allow_html=True)
|
| 159 |
with gr.Column(scale=2):
|
| 160 |
with gr.Column():
|
| 161 |
-
gr.Markdown(
|
| 162 |
gr.Markdown('<div class="section-box">', unsafe_allow_html=True)
|
| 163 |
-
gr.Markdown(
|
| 164 |
-
trends_output = gr.Plot(label="")
|
| 165 |
gr.Markdown('</div>', unsafe_allow_html=True)
|
| 166 |
|
| 167 |
-
# Sidebar for
|
| 168 |
with gr.Row():
|
| 169 |
with gr.Column():
|
| 170 |
-
video_files = [f for f in os.listdir(VIDEO_FOLDER) if f.endswith('.mp4')]
|
| 171 |
-
video_input = gr.Dropdown(choices=video_files, label="Select Video")
|
| 172 |
-
detection_type = gr.Dropdown(choices=["Solar Panel", "Windmill"], label="Detection Type")
|
| 173 |
-
submit_btn = gr.Button("Start Processing")
|
| 174 |
|
| 175 |
-
# Connect inputs to outputs
|
| 176 |
submit_btn.click(
|
| 177 |
fn=process_video,
|
| 178 |
inputs=[video_input, detection_type],
|
|
@@ -180,4 +183,5 @@ with gr.Blocks(css=css) as demo:
|
|
| 180 |
_js="() => [document.querySelector('input[type=\"file\"]').value, document.querySelector('select[name=\"detection_type\"]').value]"
|
| 181 |
)
|
| 182 |
|
|
|
|
| 183 |
demo.launch()
|
|
|
|
| 1 |
+
import gradio as gr # Import Gradio for building the interactive UI
|
| 2 |
+
import cv2 # Import OpenCV for video processing and annotation
|
| 3 |
+
import os # Import os for file handling
|
| 4 |
+
import numpy as np # Import NumPy for array operations
|
| 5 |
+
from datetime import datetime # Import datetime for timestamp generation
|
| 6 |
+
import matplotlib.pyplot as plt # Import Matplotlib for plotting trends
|
| 7 |
+
|
| 8 |
+
# Import custom modules for fault detection, model loading, and settings
|
| 9 |
from services.detection_service import detect_faults_solar, detect_faults_windmill
|
| 10 |
from services.anomaly_service import track_faults, predict_fault
|
| 11 |
from models.solar_model import load_solar_model
|
| 12 |
from models.windmill_model import load_windmill_model
|
| 13 |
from config.settings import VIDEO_FOLDER
|
| 14 |
|
| 15 |
+
# Initialize global state to track faults across frames
|
| 16 |
+
logs = [] # List to store log entries
|
| 17 |
+
fault_counts = [] # List to store fault counts per frame
|
| 18 |
+
frame_numbers = [] # List to store frame numbers
|
| 19 |
+
total_detected = 0 # Counter for total faults detected
|
| 20 |
|
| 21 |
+
# Custom CSS to style the dashboard, mimicking the screenshot's blue borders and layout
|
| 22 |
css = """
|
| 23 |
<style>
|
| 24 |
.main-header {
|
|
|
|
| 60 |
</style>
|
| 61 |
"""
|
| 62 |
|
| 63 |
+
# Function to process video frames and detect faults
|
| 64 |
def process_video(video_path, detection_type):
|
| 65 |
global logs, fault_counts, frame_numbers, total_detected
|
| 66 |
+
cap = cv2.VideoCapture(video_path) # Open the video file
|
| 67 |
if not cap.isOpened():
|
| 68 |
return "Error: Could not open video file.", None, None, None, None, None
|
| 69 |
|
| 70 |
+
model = load_solar_model() if detection_type == "Solar Panel" else load_windmill_model() # Load appropriate model
|
| 71 |
frame_count = 0
|
| 72 |
|
| 73 |
+
# Clear previous state for a new video session
|
| 74 |
logs.clear()
|
| 75 |
fault_counts.clear()
|
| 76 |
frame_numbers.clear()
|
| 77 |
total_detected = 0
|
| 78 |
|
| 79 |
while cap.isOpened():
|
| 80 |
+
ret, frame = cap.read() # Read each frame
|
| 81 |
if not ret:
|
| 82 |
break
|
| 83 |
|
| 84 |
frame_count += 1
|
| 85 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # Convert to RGB for display
|
| 86 |
|
| 87 |
+
# Detect faults using the appropriate model
|
| 88 |
faults = detect_faults_solar(model, frame_rgb) if detection_type == "Solar Panel" else detect_faults_windmill(model, frame_rgb)
|
| 89 |
num_faults = len(faults)
|
| 90 |
|
| 91 |
+
# Draw bounding boxes and labels for detected faults
|
| 92 |
for fault in faults:
|
| 93 |
x, y = int(fault['location'][0]), int(fault['location'][1])
|
| 94 |
+
cv2.rectangle(frame_rgb, (x-30, y-30), (x+30, y+30), (255, 0, 0), 2) # Draw blue box
|
| 95 |
cv2.putText(frame_rgb, f"{fault['type']}", (x, y-40),
|
| 96 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 0), 2) # Add fault type label
|
| 97 |
|
| 98 |
+
# Update state with current frame data
|
| 99 |
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 100 |
log_entry = f"{timestamp} - Frame {frame_count} - Faults: {num_faults}"
|
| 101 |
logs.append(log_entry)
|
|
|
|
| 103 |
fault_counts.append(num_faults)
|
| 104 |
frame_numbers.append(frame_count)
|
| 105 |
|
| 106 |
+
# Limit data to last 100 frames for performance
|
| 107 |
if len(frame_numbers) > 100:
|
| 108 |
frame_numbers.pop(0)
|
| 109 |
fault_counts.pop(0)
|
| 110 |
|
| 111 |
+
# Prepare outputs for Gradio UI
|
| 112 |
video_output = frame_rgb
|
| 113 |
metrics = f"faults: {num_faults}<br>total_detected: {total_detected}"
|
| 114 |
+
live_logs = "<br>".join(logs[-20:]) # Display last 20 logs
|
| 115 |
last_5_events = "<br>".join(logs[-5:]) if logs else "No events yet"
|
| 116 |
prediction = "Potential fault escalation detected!" if predict_fault(fault_counts) else ""
|
| 117 |
|
| 118 |
+
# Generate fault trends graph
|
| 119 |
fig, ax = plt.subplots(figsize=(6, 3))
|
| 120 |
ax.plot(frame_numbers, fault_counts, marker='o', color='blue')
|
| 121 |
ax.set_title("Faults Over Time", fontsize=10)
|
|
|
|
| 127 |
|
| 128 |
return video_output, metrics, live_logs, last_5_events, fig, prediction
|
| 129 |
|
| 130 |
+
# Create Gradio Blocks interface with custom CSS
|
| 131 |
with gr.Blocks(css=css) as demo:
|
| 132 |
+
gr.Markdown("### THERMAL FAULT DETECTION DASHBOARD") # Main header
|
| 133 |
+
gr.Markdown("#### 🟢 RUNNING") # Status indicator
|
| 134 |
|
| 135 |
with gr.Row():
|
| 136 |
with gr.Column(scale=3):
|
| 137 |
with gr.Column():
|
| 138 |
+
gr.Markdown("**LIVE VIDEO FEED**") # Section title
|
| 139 |
gr.Markdown('<div class="section-box">', unsafe_allow_html=True)
|
| 140 |
+
video_output = gr.Image(label="", interactive=False) # Display video feed
|
| 141 |
gr.Markdown('</div>', unsafe_allow_html=True)
|
| 142 |
with gr.Column(scale=1):
|
| 143 |
with gr.Column():
|
| 144 |
+
gr.Markdown("**LIVE METRICS**") # Section title
|
| 145 |
gr.Markdown('<div class="section-box">', unsafe_allow_html=True)
|
| 146 |
+
metrics_output = gr.Markdown(label="") # Display metrics
|
| 147 |
+
prediction_output = gr.Markdown(label="") # Display prediction warning
|
| 148 |
gr.Markdown('</div>', unsafe_allow_html=True)
|
| 149 |
|
| 150 |
with gr.Row():
|
| 151 |
with gr.Column(scale=1):
|
| 152 |
with gr.Column():
|
| 153 |
+
gr.Markdown("**LIVE LOGS**") # Section title
|
| 154 |
gr.Markdown('<div class="section-box">', unsafe_allow_html=True)
|
| 155 |
+
logs_output = gr.Markdown(label="") # Display live logs
|
| 156 |
gr.Markdown('</div>', unsafe_allow_html=True)
|
| 157 |
with gr.Column():
|
| 158 |
+
gr.Markdown("**LAST 5 CAPTURED EVENTS**") # Section title
|
| 159 |
gr.Markdown('<div class="section-box">', unsafe_allow_html=True)
|
| 160 |
+
events_output = gr.Markdown(label="") # Display last 5 events
|
| 161 |
gr.Markdown('</div>', unsafe_allow_html=True)
|
| 162 |
with gr.Column(scale=2):
|
| 163 |
with gr.Column():
|
| 164 |
+
gr.Markdown("**DETECTION TRENDS**") # Section title
|
| 165 |
gr.Markdown('<div class="section-box">', unsafe_allow_html=True)
|
| 166 |
+
gr.Markdown("**Faults Over Time**") # Sub-title
|
| 167 |
+
trends_output = gr.Plot(label="") # Display fault trends graph
|
| 168 |
gr.Markdown('</div>', unsafe_allow_html=True)
|
| 169 |
|
| 170 |
+
# Sidebar for user inputs
|
| 171 |
with gr.Row():
|
| 172 |
with gr.Column():
|
| 173 |
+
video_files = [f for f in os.listdir(VIDEO_FOLDER) if f.endswith('.mp4')] # Get video files
|
| 174 |
+
video_input = gr.Dropdown(choices=video_files, label="Select Video") # Video selection
|
| 175 |
+
detection_type = gr.Dropdown(choices=["Solar Panel", "Windmill"], label="Detection Type") # Detection type
|
| 176 |
+
submit_btn = gr.Button("Start Processing") # Trigger button
|
| 177 |
|
| 178 |
+
# Connect inputs to outputs with event trigger
|
| 179 |
submit_btn.click(
|
| 180 |
fn=process_video,
|
| 181 |
inputs=[video_input, detection_type],
|
|
|
|
| 183 |
_js="() => [document.querySelector('input[type=\"file\"]').value, document.querySelector('select[name=\"detection_type\"]').value]"
|
| 184 |
)
|
| 185 |
|
| 186 |
+
# Launch the Gradio app
|
| 187 |
demo.launch()
|