File size: 2,272 Bytes
171c194
 
8a6d1d5
171c194
 
 
 
 
 
 
8a6d1d5
 
171c194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8a6d1d5
171c194
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8a6d1d5
171c194
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
import gradio as gr
import cv2
import codecs
import os
from PIL import Image
from ultralytics import YOLO


################## MODEL ##################
model = YOLO('best.pt')
title = "RSUD20K: A Dataset for Road Scene Understanding In Autonomous Driving"
description = codecs.open("description.html", "r", "utf-8").read()

################## IMAGE ##################
Image_directory = "examples/images"

inputs_image = [
    gr.components.Image(type="filepath", label="Input Image"),
]
outputs_image = [
    gr.components.Image(type="numpy", label="Output Image"),
]

def show_preds_image(image_path):
    image = cv2.imread(image_path)
    outputs = model.predict(source=image_path)
    results = Image.fromarray(outputs[0].plot()[:, :, ::-1])
    return results

    
demo_image = gr.Interface(
    fn=show_preds_image,
    title=title,
    description=description,
    inputs= inputs_image,
    outputs= outputs_image,
    examples= [os.path.join(Image_directory, fname) for fname in os.listdir(Image_directory) if fname.endswith(".jpg")],
    allow_flagging="never",
    analytics_enabled=False,
)

################## VIDEO ##################

Video_directory = "examples/videos"

inputs_video = [
    gr.components.Video(label="Input Video"),
]

outputs_video = [
    gr.components.Image(type = "numpy", label="Output Video"),
]


def show_preds_video(video_path):
    cap = cv2.VideoCapture(video_path)
    predicted_frames = []
    while(cap.isOpened()):
        ret, frame = cap.read()
        if ret:
            frame_copy = frame.copy()
            outputs = model.predict(source=frame)
            results = Image.fromarray(outputs[0].plot()[:, :, ::-1])
            yield results
        else:
            break
    cap.release()
    cv2.destroyAllWindows()

demo_video = gr.Interface(
    fn=show_preds_video,
    title=title,
    description=description,
    inputs= inputs_video,
    outputs= outputs_video,
    examples= [os.path.join(Video_directory, fname) for fname in os.listdir(Video_directory) if fname.endswith(".mp4")],
    allow_flagging="never",
    analytics_enabled=False,
)

################## LAUNCH ##################
gr.TabbedInterface(
    [demo_image, demo_video],
    tab_names=['Image inference', 'Video inference']
).queue().launch()