freak360 commited on
Commit
41b1a4a
1 Parent(s): 38f5c90

Upload 4 files

Browse files
Files changed (4) hide show
  1. Drone Detection.mp4 +0 -0
  2. best.pt +3 -0
  3. requirements.txt +8 -0
  4. test.py +94 -0
Drone Detection.mp4 ADDED
Binary file (366 kB). View file
 
best.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b2e68e96e61feb870a2a776c0e8be952e293e3fe214a379add20d9f59ccb6adc
3
+ size 136719657
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ Pillow
2
+ opencv-python
3
+ streamlit
4
+ torch
5
+ matplotlib
6
+ numpy
7
+ requests
8
+ gradio
test.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from ultralytics import YOLO
3
+ import cv2
4
+ import os
5
+ import numpy as np
6
+ import sys
7
+ import asyncio
8
+
9
+ if sys.platform == 'win32':
10
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
11
+
12
+ os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE"
13
+
14
+
15
+ # Load your trained model
16
+ model = YOLO(r"C:\Users\Falcon\Downloads\Projects\Falcon Projects\Drone-Detection-System\drone_detection_system\best.pt")
17
+
18
+ def predict_image(image):
19
+ # Perform detection
20
+ detections = model.track(source=image, conf=0.3, iou=0.5, show=False)
21
+
22
+ # Render the detected image
23
+ detected_image = np.squeeze(detections[0].plot())
24
+
25
+ # Check if any drones were detected
26
+ num_drones = len(detections[0]) # Assuming detection results are stored in xywh format
27
+ message = "Drone detected!" if num_drones > 0 else "No drones detected."
28
+
29
+ return detected_image, message # Return the detected image and the message
30
+
31
+
32
+
33
+ def predict_video(video_path):
34
+ # Load the video
35
+ cap = cv2.VideoCapture(video_path)
36
+ if not cap.isOpened():
37
+ return "Error: Could not open video.", ""
38
+
39
+ drone_detected = False
40
+ # Prepare to write the output video
41
+ frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
42
+ frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
43
+ out_fps = cap.get(cv2.CAP_PROP_FPS)
44
+ output_path = 'output_video.mp4'
45
+ out = cv2.VideoWriter(output_path, cv2.VideoWriter_fourcc(*'mp4v'), out_fps, (frame_width, frame_height))
46
+
47
+ # Process each frame
48
+ while cap.isOpened():
49
+ ret, frame = cap.read()
50
+ if not ret:
51
+ break
52
+
53
+ # Perform detection on the frame
54
+ results = model.track(frame, imgsz=640, conf=0.3, iou=0.5)
55
+
56
+ # Check if any drones were detected in this frame
57
+ if len(results[0]) > 0:
58
+ drone_detected = True
59
+
60
+ # Draw boxes on the frame
61
+ annotated_frame = np.squeeze(results[0].plot())
62
+
63
+ # Write the frame to the output video
64
+ out.write(annotated_frame)
65
+
66
+ # Release everything when done
67
+ cap.release()
68
+ out.release()
69
+
70
+ message = "Drone detected in video!" if drone_detected else "No drones detected in video."
71
+ print("Video processing complete. Saved to:", output_path)
72
+ return output_path, message # Return the path to the output video and the message
73
+
74
+
75
+ with gr.Blocks() as demo:
76
+ gr.Markdown("### Drone Detection System")
77
+ with gr.Tab("Introduction"):
78
+ gr.Markdown("**This Application helps in detection of DRONES in an IMAGE, VIDEO or from your WEBCAM depending on your App mode.**")
79
+ gr.Markdown("You Don't Necessarily need a Drone to run this app; you can use an image from google.\n\n**SAMPLE OUTPUT:**")
80
+ gr.Video("C:\\Users\\Falcon\\Downloads\\Projects\\Falcon Projects\\Drone-Detection-System\\Drone_Detection_Using_YOLOv5\\Drone Detection.mp4", width=800, height=600)
81
+ with gr.Tab("Upload Image"):
82
+ image_input = gr.Image()
83
+ image_output = gr.Image()
84
+ alert = gr.Label()
85
+ image_input.change(fn=predict_image, inputs=image_input, outputs=[image_output, alert])
86
+ with gr.Tab("Upload Video"):
87
+ video_input = gr.Video(sources="upload")
88
+ video_output = gr.Video(render=True)
89
+ alert_video = gr.Label()
90
+ video_input.change(fn=predict_video, inputs=video_input, outputs=[video_output, alert_video])
91
+ with gr.Tab("Live"):
92
+ gr.Markdown("Live detection will be implemented soon.")
93
+
94
+ demo.launch()