import streamlit as st from ultralytics import YOLO from PIL import Image import io # --- Page Configuration --- st.set_page_config( page_title="YOLO Object Detection For Satellite Image", page_icon="🤖", layout="wide", initial_sidebar_state="expanded", ) # --- Theme-Aware Custom CSS --- st.markdown(""" """, unsafe_allow_html=True) # --- Model Loading --- @st.cache_resource def load_model(model_path): """ Loads the YOLO model from the specified path. Caches the model to avoid reloading on every interaction. """ try: model = YOLO(model_path) return model except Exception as e: st.error(f"Error loading model: {e}") return None # Path to the model file inside the 'src' directory MODEL_PATH = './src/rssi_last.pt' model = load_model(MODEL_PATH) # --- Sidebar --- st.sidebar.header("Configuration") confidence_threshold = st.sidebar.slider( "Confidence Threshold", 0.0, 1.0, 0.4, 0.05 ) st.sidebar.markdown("---") uploaded_file = st.sidebar.file_uploader( "Upload an image...", type=["jpg", "jpeg", "png"] ) st.sidebar.markdown("---") st.sidebar.markdown( "**About this App**\n\n" "This application uses a custom-trained YOLO model to detect objects in images. " "Upload an image and see the magic!" ) # --- Main Page --- st.title("🖼️ Custom Object Detection with YOLO for Satellite image") if uploaded_file is not None: # Read the uploaded image file image_data = uploaded_file.getvalue() original_image = Image.open(io.BytesIO(image_data)) # Create two columns for side-by-side display col1, col2 = st.columns(2) with col1: st.subheader("Original Image") # CHANGED: use_container_width is the new, recommended parameter st.image(original_image, caption="Your uploaded image.", use_container_width=True) if model: # Perform inference with st.spinner("Running detection..."): results = model(original_image, conf=confidence_threshold) # The result object contains the annotated image and detection data result = results[0] # Use the plot() method to get an annotated image (in BGR format) annotated_image_bgr = result.plot() # Convert BGR to RGB for display in Streamlit annotated_image_rgb = annotated_image_bgr[..., ::-1] with col2: st.subheader("Detected Objects") # CHANGED: use_container_width is the new, recommended parameter st.image(annotated_image_rgb, caption="Image with detected objects.", use_container_width=True) # Display detection details st.subheader("Detection Details") if len(result.boxes) > 0: with st.expander("Click to see detailed results", expanded=True): # Extract details for each detected box for i, box in enumerate(result.boxes): label = result.names[box.cls[0].item()] conf = box.conf[0].item() xywhn = box.xywhn[0].tolist() # Normalized xywh st.markdown(f"**Object {i+1}: `{label}`**") st.write(f"- Confidence: **{conf:.2f}**") st.write(f"- Bounding Box (Normalized xywh):") st.code(f" x: {xywhn[0]:.4f}, y: {xywhn[1]:.4f}, width: {xywhn[2]:.4f}, height: {xywhn[3]:.4f}") else: st.info("No objects were detected with the current confidence threshold.") else: st.info("Please upload an image using the sidebar to begin.")