Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
-
# main.py
|
2 |
import importlib
|
3 |
import gradio as gr
|
|
|
|
|
4 |
|
5 |
def load_model(model_name):
|
6 |
module = importlib.import_module(model_name)
|
@@ -20,19 +21,45 @@ def detect_video(model_choice, input_video):
|
|
20 |
model = load_model(models[model_choice])
|
21 |
return model.detect_video(input_video)
|
22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
app = gr.Blocks()
|
24 |
|
25 |
with app:
|
26 |
gr.Markdown("## Object Detection using TensorFlow Lite Models")
|
|
|
27 |
with gr.Row():
|
28 |
model_choice = gr.Dropdown(label="Select Model", choices=list(models.keys()))
|
|
|
29 |
with gr.Tab("Image Detection"):
|
30 |
-
image_input = gr.Image(type="pil", label="Upload an image")
|
31 |
image_output = gr.Image(type="pil", label="Detection Result")
|
32 |
gr.Button("Submit Image").click(fn=detect_image, inputs=[model_choice, image_input], outputs=image_output)
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
with gr.Tab("Video Detection"):
|
34 |
-
video_input = gr.Video(label="Upload a video")
|
35 |
video_output = gr.Video(label="Detection Result")
|
36 |
gr.Button("Submit Video").click(fn=detect_video, inputs=[model_choice, video_input], outputs=video_output)
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
app.launch(share=True)
|
|
|
|
|
1 |
import importlib
|
2 |
import gradio as gr
|
3 |
+
from PIL import Image
|
4 |
+
import os
|
5 |
|
6 |
def load_model(model_name):
|
7 |
module = importlib.import_module(model_name)
|
|
|
21 |
model = load_model(models[model_choice])
|
22 |
return model.detect_video(input_video)
|
23 |
|
24 |
+
# Sample files
|
25 |
+
sample_images = ["sample/test.jpg"]
|
26 |
+
sample_videos = ["sample/test2.mp4"]
|
27 |
+
|
28 |
+
def get_sample_image_paths():
|
29 |
+
return [os.path.join("sample", f) for f in os.listdir("sample") if f.endswith(('.jpg', '.jpeg', '.png'))]
|
30 |
+
|
31 |
+
def get_sample_video_paths():
|
32 |
+
return [os.path.join("sample", f) for f in os.listdir("sample") if f.endswith(('.mp4', '.avi'))]
|
33 |
+
|
34 |
+
sample_images = get_sample_image_paths()
|
35 |
+
sample_videos = get_sample_video_paths()
|
36 |
+
|
37 |
app = gr.Blocks()
|
38 |
|
39 |
with app:
|
40 |
gr.Markdown("## Object Detection using TensorFlow Lite Models")
|
41 |
+
|
42 |
with gr.Row():
|
43 |
model_choice = gr.Dropdown(label="Select Model", choices=list(models.keys()))
|
44 |
+
|
45 |
with gr.Tab("Image Detection"):
|
46 |
+
image_input = gr.Image(type="pil", label="Upload an image", source="upload")
|
47 |
image_output = gr.Image(type="pil", label="Detection Result")
|
48 |
gr.Button("Submit Image").click(fn=detect_image, inputs=[model_choice, image_input], outputs=image_output)
|
49 |
+
|
50 |
+
gr.Markdown("### Or choose a sample image")
|
51 |
+
sample_image_dataset = gr.Dataset(components=[gr.Image(type="pil")], samples=[[Image.open(sample)] for sample in sample_images])
|
52 |
+
sample_image_output = gr.Image(type="pil", label="Sample Detection Result")
|
53 |
+
sample_image_dataset.click(fn=detect_image, inputs=[model_choice, sample_image_dataset], outputs=sample_image_output)
|
54 |
+
|
55 |
with gr.Tab("Video Detection"):
|
56 |
+
video_input = gr.Video(label="Upload a video", source="upload")
|
57 |
video_output = gr.Video(label="Detection Result")
|
58 |
gr.Button("Submit Video").click(fn=detect_video, inputs=[model_choice, video_input], outputs=video_output)
|
59 |
+
|
60 |
+
gr.Markdown("### Or choose a sample video")
|
61 |
+
sample_video_dataset = gr.Dataset(components=[gr.Video()], samples=[[sample] for sample in sample_videos])
|
62 |
+
sample_video_output = gr.Video(label="Sample Detection Result")
|
63 |
+
sample_video_dataset.click(fn=detect_video, inputs=[model_choice, sample_video_dataset], outputs=sample_video_output)
|
64 |
|
65 |
app.launch(share=True)
|