File size: 2,973 Bytes
1d9d691
 
527799a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1d9d691
 
 
527799a
1d9d691
cb2178b
 
 
 
 
 
527799a
 
 
cb2178b
 
527799a
 
cb2178b
527799a
 
88b4257
527799a
 
 
 
88b4257
527799a
 
88b4257
527799a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
import streamlit as st
import os
import cv2
import numpy as np
from ultralytics import YOLO

# Load YOLO model
model = YOLO('yolov8n.pt')  # Ensure you have the correct model file

def process_video(video_path):
    cap = cv2.VideoCapture(video_path)
    width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = int(cap.get(cv2.CAP_PROP_FPS))

    # Create a video writer to save the output
    output_path = os.path.join(os.getcwd(), "output.mp4")
    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    out = cv2.VideoWriter(output_path, fourcc, fps, (width, height))

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        results = model(frame)
        for result in results:
            for bbox in result.boxes:
                x1, y1, x2, y2 = map(int, bbox.xyxy[0])
                confidence = float(bbox.conf)
                cls = int(bbox.cls)

                if cls == 0:  # Assuming class 0 is 'person'
                    w = x2 - x1
                    h = y2 - y1

                    if h < w:
                        color = (0, 0, 255)  # Red color for fall detected
                        label = "Fall Detected"
                    else:
                        color = (0, 255, 0)  # Green color for normal detection
                        label = "Person"

                    cv2.rectangle(frame, (x1, y1), (x2, y2), color, 2)
                    cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)

        out.write(frame)

    cap.release()
    out.release()

    # Double-check that the file was saved
    if os.path.exists(output_path):
        st.success(f"Video successfully processed and saved to {output_path}")
    else:
        st.error("Failed to save the processed video.")
    
    return output_path

# Streamlit interface
st.title("Fall Detection App")
st.write("The default video is automatically processed to detect falls.")

# Create two columns
left_column, right_column = st.columns(2)

# Right column for video selection
with right_column:
    default_videos = {
        "Video 1": os.path.join(os.getcwd(), "fall_test_01.mp4"),
        "Video 2": os.path.join(os.getcwd(), "fall_test_02.mp4"),
        "Video 3": "video3.mp4",
    }

    # Select the first video as the default input
    default_video_path = default_videos["Video 1"]

    # Display the selection to the user
    st.write("Default video selected: Video 1")

# Process the default video automatically
output_video = process_video(default_video_path)
if output_video and os.path.exists(output_video):
    left_column.video(output_video)  # Display video in the left column
    left_column.write("Download the processed video:")
    with open(output_video, "rb") as video_file:
        left_column.download_button("Download", video_file, "output.mp4")
else:
    st.error("There was an issue processing the video. Please try again.")