Spaces:
Sleeping
Sleeping
# import streamlit as st | |
# from ultralytics import YOLO | |
# import cv2 | |
# import numpy as np | |
# from PIL import Image | |
# model = YOLO("best.pt") | |
# model.predict(source=0,imgsize=640,conf=0.6,show=True) | |
# # Load the YOLO model (replace with your model path) | |
# model = YOLO("best.pt") # Use your YOLO model file here | |
# st.title("Fire Detection in Forest") | |
# # Sidebar for input options | |
# input_option = st.sidebar.selectbox("Select Input Method", ["Upload Image", "Use Webcam", "Upload Video"]) | |
# if input_option == "Upload Image": | |
# # Upload Image | |
# uploaded_file = st.file_uploader("Choose an Image", type=["jpg", "jpeg", "png"]) | |
# if uploaded_file is not None: | |
# img = Image.open(uploaded_file) | |
# st.image(img, caption='User Image') | |
# st.write("Classifying...") | |
# # Convert image to numpy array | |
# img_np = np.array(img) | |
# # Make predictions | |
# results = model.predict(source=img_np, conf=0.5) | |
# # Variable to check if fire is detected | |
# fire_detected = False | |
# # Draw bounding boxes on the image | |
# for result in results: | |
# boxes = result.boxes.xyxy | |
# for box in boxes: | |
# x1, y1, x2, y2 = box[:4].astype(int) | |
# img_np = cv2.rectangle(img_np, (x1, y1), (x2, y2), (0, 255, 0), 2) | |
# # Check if the detected class is "fire" (adjust based on your model's class mapping) | |
# class_id = int(box[5]) # Assuming class ID is at the 6th position | |
# if class_id == 0: # Replace 0 with the actual class ID for fire if different | |
# fire_detected = True | |
# # Show the resulting image | |
# st.image(img_np, caption='Detected Fire', use_column_width=True) | |
# # Display message based on fire detection | |
# if fire_detected: | |
# st.success("π₯ Fire Detected!") | |
# else: | |
# st.warning("No Fire Detected.") | |
# elif input_option == "Use Webcam": | |
# st.write("Starting webcam for live detection...") | |
# # Start video capture | |
# camera = cv2.VideoCapture(0) # 0 is the default camera | |
# # Create a placeholder for the video feed | |
# video_placeholder = st.empty() | |
# # Main loop for live detection | |
# while True: | |
# ret, frame = camera.read() | |
# if not ret: | |
# st.write("Failed to capture image") | |
# break | |
# # Make predictions | |
# results = model.predict(source=frame, conf=0.5) | |
# # Draw bounding boxes on the frame | |
# for result in results: | |
# boxes = result.boxes.xyxy | |
# for box in boxes: | |
# x1, y1, x2, y2 = box[:4].astype(int) | |
# frame = cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) | |
# # Convert frame to RGB | |
# rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
# # Display the frame in the Streamlit app | |
# video_placeholder.image(rgb_frame, channels="RGB", use_column_width=True) | |
# # Break loop on user command | |
# if st.button("Stop Detection"): | |
# break | |
# # Release the camera | |
# camera.release() | |
# elif input_option == "Upload Video": | |
# uploaded_video = st.file_uploader("Choose a video", type=["mp4", "avi", "mov", "mkv"]) | |
# if uploaded_video is not None: | |
# # Save the uploaded video temporarily | |
# temp_video_path = "temp_video.mp4" | |
# with open(temp_video_path, "wb") as f: | |
# f.write(uploaded_video.read()) | |
# # Display the uploaded video | |
# st.video(temp_video_path) | |
# # Open the video file | |
# video_capture = cv2.VideoCapture(temp_video_path) | |
# # Create a placeholder for video frame processing | |
# video_frame_placeholder = st.empty() | |
# fire_detected = False | |
# # Loop through video frames | |
# while video_capture.isOpened(): | |
# ret, frame = video_capture.read() | |
# if not ret: | |
# break | |
# # Make predictions using your fire detection model | |
# results = model.predict(source=frame, conf=0.5) | |
# # Draw bounding boxes on the frame if fire is detected | |
# for result in results: | |
# boxes = result.boxes.xyxy | |
# for box in boxes: | |
# x1, y1, x2, y2 = box[:4].astype(int) | |
# frame = cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2) | |
# fire_detected = True # Set fire_detected flag if a bounding box is found | |
# # Convert the frame to RGB format | |
# rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) | |
# # Display the processed frame | |
# video_frame_placeholder.image(rgb_frame, channels="RGB", use_column_width=True) | |
# # Display detection result | |
# if fire_detected: | |
# st.write("Fire detected in the video.") | |
# else: | |
# st.write("No fire detected in the video.") | |
# # Release the video capture | |
# video_capture.release() | |
import streamlit as st | |
from ultralytics import YOLO | |
import numpy as np | |
from PIL import Image | |
# Load the YOLO model (use the path to your 'best.pt' file) | |
model = YOLO("best.pt") | |
st.title("Fire Detection in Forest") | |
# Upload Image | |
uploaded_file = st.file_uploader("Upload an Image", type=["jpg", "jpeg", "png"]) | |
if uploaded_file is not None: | |
# Open the uploaded image | |
img = Image.open(uploaded_file) | |
st.image(img, caption="Uploaded Image", use_column_width=True) | |
# Convert image to a numpy array | |
img_np = np.array(img) | |
# Make predictions | |
results = model.predict(source=img_np, imgsz=640, conf=0.5) | |
# Check if fire is detected | |
fire_detected = any("fire" in results.names[int(cls)] for cls in results[0].boxes.cls) | |
# Display results | |
if fire_detected: | |
st.success("π₯ Fire Detected!") | |
else: | |
st.warning("No Fire Detected.") | |