Spaces:
Build error
Build error
pirahansiah
commited on
Commit
•
ce3199d
1
Parent(s):
9961bfb
multi object tracking
Browse files- .DS_Store +0 -0
- .gitattributes +2 -0
- app.py +3 -2
- files/a.MOV +3 -0
- files/a.mp4 +3 -0
- files/b.mov +3 -0
- requirements.txt +2 -1
- src/__pycache__/video_processing.cpython-310.pyc +0 -0
- src/test.py +43 -5
- src/test2.py +1 -0
- src/video_processing.py +25 -0
.DS_Store
CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
|
|
.gitattributes
CHANGED
@@ -35,3 +35,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
files/b.png filter=lfs diff=lfs merge=lfs -text
|
37 |
*.png filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
files/b.png filter=lfs diff=lfs merge=lfs -text
|
37 |
*.png filter=lfs diff=lfs merge=lfs -text
|
38 |
+
*.MOV filter=lfs diff=lfs merge=lfs -text
|
39 |
+
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
app.py
CHANGED
@@ -6,6 +6,7 @@ Sometimes you just want to take an image from your database and see how it chang
|
|
6 |
import gradio as gr
|
7 |
from src.threshold_methods import threshold_methods
|
8 |
from src.yolo import yolo
|
|
|
9 |
import cv2
|
10 |
|
11 |
new_outputs = [
|
@@ -36,7 +37,7 @@ HuggingFace = gr.Interface(
|
|
36 |
)
|
37 |
|
38 |
gr.TabbedInterface(
|
39 |
-
[HuggingFace,threshold_methods,yolo],
|
40 |
-
tab_names=['HuggingFace','Thresholding Image Segmentation','YOLO']
|
41 |
).queue().launch()
|
42 |
|
|
|
6 |
import gradio as gr
|
7 |
from src.threshold_methods import threshold_methods
|
8 |
from src.yolo import yolo
|
9 |
+
from src.video_processing import video_processing
|
10 |
import cv2
|
11 |
|
12 |
new_outputs = [
|
|
|
37 |
)
|
38 |
|
39 |
gr.TabbedInterface(
|
40 |
+
[HuggingFace,threshold_methods,yolo,video_processing],
|
41 |
+
tab_names=['HuggingFace','Thresholding Image Segmentation','YOLO','video_processing']
|
42 |
).queue().launch()
|
43 |
|
files/a.MOV
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5dddb5efbd2e9dd31904e9004a15de3a47e64efd6e5a164d7e7f3a66ec581633
|
3 |
+
size 26720050
|
files/a.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:92385551e0b46905515c6d06e61c1c727eebbd5ca48f8c05ac1f2f23c4cf83ed
|
3 |
+
size 20323167
|
files/b.mov
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:afe123619d4402adf016dc46838f66998d3971b8bf693af75fa48619ced2d65a
|
3 |
+
size 4966448
|
requirements.txt
CHANGED
@@ -4,7 +4,8 @@ requests
|
|
4 |
hydra-core
|
5 |
ultralytics
|
6 |
gradio
|
7 |
-
|
|
|
8 |
|
9 |
# # Ultralytics requirements
|
10 |
# # Usage: pip install -r requirements.txt
|
|
|
4 |
hydra-core
|
5 |
ultralytics
|
6 |
gradio
|
7 |
+
ffmpeg
|
8 |
+
ffmpeg-python
|
9 |
|
10 |
# # Ultralytics requirements
|
11 |
# # Usage: pip install -r requirements.txt
|
src/__pycache__/video_processing.cpython-310.pyc
ADDED
Binary file (1.03 kB). View file
|
|
src/test.py
CHANGED
@@ -1,6 +1,12 @@
|
|
1 |
from ultralytics import YOLO
|
2 |
from PIL import Image
|
3 |
import cv2
|
|
|
|
|
|
|
|
|
|
|
|
|
4 |
def draw_boxes(image, boxes):
|
5 |
for box in boxes:
|
6 |
|
@@ -24,9 +30,41 @@ def detect_objects_on_image(buf):
|
|
24 |
])
|
25 |
return output
|
26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
cv2.imshow("test",img_with_boxes)
|
32 |
-
cv2.waitKey(0)
|
|
|
1 |
from ultralytics import YOLO
|
2 |
from PIL import Image
|
3 |
import cv2
|
4 |
+
import ffmpeg
|
5 |
+
ffmpeg.input('files/a.MOV').output('files/a.mp4').run()
|
6 |
+
ffmpeg.input('input.mov').output('output.mp4').run()
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
def draw_boxes(image, boxes):
|
11 |
for box in boxes:
|
12 |
|
|
|
30 |
])
|
31 |
return output
|
32 |
|
33 |
+
# model = MaskRCNN("mask_rcnn_model.pth")
|
34 |
+
# results = model.predict(img)
|
35 |
+
# masks = results['masks']
|
36 |
+
|
37 |
+
# img = cv2.imread('a.png')
|
38 |
+
# boxes=detect_objects_on_image(img)
|
39 |
+
# img_with_boxes = draw_boxes(img, boxes)
|
40 |
+
# cv2.imshow("test",img_with_boxes)
|
41 |
+
# cv2.waitKey(0)
|
42 |
+
model = YOLO("files/yolov8n.pt")
|
43 |
+
video_path = "files/a.MOV"
|
44 |
+
cap = cv2.VideoCapture(video_path)
|
45 |
+
|
46 |
+
# Loop through the video frames
|
47 |
+
while cap.isOpened():
|
48 |
+
# Read a frame from the video
|
49 |
+
success, frame = cap.read()
|
50 |
+
|
51 |
+
if success:
|
52 |
+
# Run YOLOv8 tracking on the frame, persisting tracks between frames
|
53 |
+
results = model.track(frame, persist=True)
|
54 |
+
|
55 |
+
# Visualize the results on the frame
|
56 |
+
annotated_frame = results[0].plot()
|
57 |
+
|
58 |
+
# Display the annotated frame
|
59 |
+
cv2.imshow("YOLOv8 Tracking", annotated_frame)
|
60 |
+
|
61 |
+
# Break the loop if 'q' is pressed
|
62 |
+
if cv2.waitKey(1) & 0xFF == ord("q"):
|
63 |
+
break
|
64 |
+
else:
|
65 |
+
# Break the loop if the end of the video is reached
|
66 |
+
break
|
67 |
|
68 |
+
# Release the video capture object and close the display window
|
69 |
+
cap.release()
|
70 |
+
cv2.destroyAllWindows()
|
|
|
|
src/test2.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
# www.pirahansiah.com
|
src/video_processing.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import gradio as gr
|
4 |
+
from ultralytics import YOLO
|
5 |
+
video_path = [['files/a.mp4']]
|
6 |
+
inputs = [gr.components.Video(type="filepath", label="Input ")]
|
7 |
+
outputs = [gr.components.Image(type="numpy", label="Output ")]
|
8 |
+
model = YOLO("files/yolov8n.pt")
|
9 |
+
def show_preds_video(video_file):
|
10 |
+
cap = cv2.VideoCapture(video_file)
|
11 |
+
while cap.isOpened():
|
12 |
+
success, frame = cap.read()
|
13 |
+
if success:
|
14 |
+
results = model.track(frame, persist=True)
|
15 |
+
annotated_frame = results[0].plot()
|
16 |
+
yield cv2.cvtColor(annotated_frame, cv2.COLOR_BGR2RGB)
|
17 |
+
|
18 |
+
video_processing = gr.Interface(
|
19 |
+
fn=show_preds_video,
|
20 |
+
inputs=inputs,
|
21 |
+
outputs=outputs,
|
22 |
+
examples=video_path,
|
23 |
+
cache_examples=False,
|
24 |
+
title="Video Multi Objects tracking",
|
25 |
+
)
|