File size: 12,408 Bytes
2d4f58a
 
 
 
 
 
29f3834
 
2d4f58a
 
 
 
 
29f3834
 
2d4f58a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29f3834
2d4f58a
29f3834
 
2d4f58a
 
29f3834
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2d4f58a
 
 
 
 
29f3834
2d4f58a
 
 
29f3834
 
2d4f58a
 
 
 
 
 
 
 
 
 
 
 
29f3834
2d4f58a
 
 
29f3834
2d4f58a
 
29f3834
2d4f58a
 
29f3834
2d4f58a
 
 
 
 
 
 
 
 
 
 
29f3834
2d4f58a
 
 
 
 
 
 
29f3834
 
2d4f58a
 
 
 
 
 
29f3834
2d4f58a
 
 
29f3834
2d4f58a
 
 
 
 
 
 
 
29f3834
2d4f58a
29f3834
2d4f58a
29f3834
2d4f58a
29f3834
 
 
2d4f58a
 
29f3834
 
 
 
 
 
 
 
2d4f58a
 
 
 
 
 
29f3834
2d4f58a
 
 
29f3834
2d4f58a
29f3834
 
 
2d4f58a
 
 
29f3834
 
2d4f58a
 
29f3834
2d4f58a
29f3834
 
 
 
 
 
2d4f58a
29f3834
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2d4f58a
 
29f3834
 
 
 
 
 
 
2d4f58a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
import gradio as gr
import torch
from PIL import Image
import json
import numpy as np
import cv2

# Load the models
week8_model = torch.hub.load(
    './', 'custom', path='Weights/Week_8.pt', source='local')
week9_model = torch.hub.load(
    './', 'custom', path='Weights/Week_9.pt', source='local')


def draw_own_bbox(img, x1, y1, x2, y2, label, color=(36, 255, 12), text_color=(0, 0, 0)):
    """
    Draw bounding box on the image with text label and save both the raw and annotated image in the 'own_results' folder

    Inputs
    ------
    img: numpy.ndarray - image on which the bounding box is to be drawn

    x1: int - x coordinate of the top left corner of the bounding box

    y1: int - y coordinate of the top left corner of the bounding box

    x2: int - x coordinate of the bottom right corner of the bounding box

    y2: int - y coordinate of the bottom right corner of the bounding box

    label: str - label to be written on the bounding box

    color: tuple - color of the bounding box

    text_color: tuple - color of the text label

    Returns
    -------
    None

    """
    name_to_id = {
        "NA": 'NA',
        "Bullseye": 10,
        "One": 11,
        "Two": 12,
        "Three": 13,
        "Four": 14,
        "Five": 15,
        "Six": 16,
        "Seven": 17,
        "Eight": 18,
        "Nine": 19,
        "A": 20,
        "B": 21,
        "C": 22,
        "D": 23,
        "E": 24,
        "F": 25,
        "G": 26,
        "H": 27,
        "S": 28,
        "T": 29,
        "U": 30,
        "V": 31,
        "W": 32,
        "X": 33,
        "Y": 34,
        "Z": 35,
        "Up": 36,
        "Down": 37,
        "Right": 38,
        "Left": 39,
        "Up Arrow": 36,
        "Down Arrow": 37,
        "Right Arrow": 38,
        "Left Arrow": 39,
        "Stop": 40
    }
    # Reformat the label to {label name}-{label id}
    label = label + "-" + str(name_to_id[label])
    # Convert the coordinates to int
    x1 = int(x1)
    x2 = int(x2)
    y1 = int(y1)
    y2 = int(y2)
    # Draw the bounding box
    img = cv2.rectangle(img, (x1, y1), (x2, y2), color, 2)
    # For the text background, find space required by the text so that we can put a background with that amount of width.
    (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 1)
    # Print the text
    img = cv2.rectangle(img, (x1, y1 - 20), (x1 + w, y1), color, -1)
    img = cv2.putText(img, label, (x1, y1 - 5),
                      cv2.FONT_HERSHEY_SIMPLEX, 0.6, text_color, 1)
    return img


def yolo(img, model, toggles, signal):
    """
    Run YOLOv5 on the image and return the results

    Inputs
    ------
    img: numpy.ndarray - image on which the YOLOv5 model is to be run
    model: str - name of the model to be used
    toggles: dict - dictionary containing the toggles for the model
    signal: str - signal for position heuristic

    Returns
    -------
    output_image: PIL.Image - image with bounding boxes drawn on it
    original_results: json - json containing the original results
    filtered_image: PIL.Image - image with bounding boxes drawn on it after filtering
    filtered_results: json - json containing the filtered results
    """
    # Load the model based on the model name
    if model == "Week 8":
        model = week8_model
    else:
        model = week9_model

    # Run the model on the image
    results = model(img)

    # Original output image and results
    original_results = json.loads(
        results.pandas().xyxy[0].to_json(orient="records"))
    output_image = Image.fromarray(results.render()[0])

    # Convert the results to a pandas dataframe and calculate the height and width of the bounding box and the area of the bounding box
    df_results = results.pandas().xyxy[0]
    df_results['bboxHt'] = df_results['ymax'] - df_results['ymin']
    df_results['bboxWt'] = df_results['xmax'] - df_results['xmin']
    df_results['bboxArea'] = df_results['bboxHt'] * df_results['bboxWt']

    # Label with largest bbox height will be last
    df_results = df_results.sort_values('bboxArea', ascending=False)

    # Filter out Bullseye
    pred_list = df_results
    if 'Ignore Bullseye' in toggles:
        pred_list = pred_list[pred_list['name'] != 'Bullseye']

    # If no predictions, return the empty results
    if len(pred_list) == 0:
        return [output_image, original_results, output_image, original_results]
    # If only one prediction, no need to filter
    elif len(pred_list) == 1:
        pred = pred_list.iloc[0]
    # If more than one prediction, filter the predictions 
    else:
        pred_shortlist = []
        current_area = pred_list.iloc[0]['bboxArea']

        # For each prediction, check if the confidence is greater than 0.5 and if the area is greater than 80% of the current area or 60% if the prediction is 'One'
        for _, row in pred_list.iterrows():
            if row['confidence'] > 0.5 and ((current_area * 0.8 <= row['bboxArea']) or (row['name'] == 'One' and current_area * 0.6 <= row['bboxArea'])):
                # Add the prediction to the shortlist
                pred_shortlist.append(row)
                # Update the current area to the area of the prediction
                current_area = row['bboxArea']

        # If only 1 prediction remains after filtering by confidence and area
        if len(pred_shortlist) == 1:
            # Choose that prediction
            pred = pred_shortlist[0]

        # If multiple predictions remain after filtering by confidence and area
        else:
            # Use signal of {signal} to filter further

            # Sort the predictions by xmin
            pred_shortlist.sort(key=lambda x: x['xmin'])

            # If signal is 'L', choose the first prediction in the list, i.e. leftmost in the image
            if signal == 'L':
                pred = pred_shortlist[0]

            # If signal is 'R', choose the last prediction in the list, i.e. rightmost in the image
            elif signal == 'R':
                pred = pred_shortlist[-1]

            # If signal is 'C', choose the prediction that is central in the image
            else:
                # Loop through the predictions shortlist
                for i in range(len(pred_shortlist)):
                    # If the xmin of the prediction is between 250 and 774, i.e. the center of the image, choose that prediction
                    if pred_shortlist[i]['xmin'] > 250 and pred_shortlist[i]['xmin'] < 774:
                        pred = pred_shortlist[i]
                        break

                # If no prediction is central, choose the one with the largest area
                if isinstance(pred, str):
                    # Choosing one with largest area if none are central
                    pred_shortlist.sort(key=lambda x: x['bboxArea'])
                    pred = pred_shortlist[-1]
    # Draw the bounding box on the image 
    filtered_img = draw_own_bbox(np.array(
        img), pred['xmin'], pred['ymin'], pred['xmax'], pred['ymax'], pred['name'])
    return [output_image, original_results, filtered_img, json.loads(pred.to_json(orient="records"))]

# Define the interface
inputs = [gr.inputs.Image(type='pil', label="Original Image"),
          gr.inputs.Radio(['Week 8', 'Week 9'], type="value",
                          default='Week 8', label='Model Selection'),
          gr.CheckboxGroup(["Ignore Bullseye", "Biggest BBox Only and Position-Based Heuristics",], value=[
                           "Ignore Bullseye", "Biggest BBox Only and Position-Based Heuristics"], label="Heuristic Toggles"),
          gr.inputs.Radio(['Left', 'Center', 'Right', 'Disabled'],
                          type="value", default='Center', label='Position Heuristic'),
          ]
outputs = [gr.outputs.Image(type="pil", label="Output Image"),
           gr.outputs.JSON(label="Output JSON"),
           gr.outputs.Image(type="pil", label="Filtered Output Image"),
           gr.outputs.JSON(label="Filtered Output JSON")
           ]
# Define the examples
examples = [['Examples/One.jpg'], ['Examples/Two.jpg'], ['Examples/Three.jpg'], ['Examples/1.jpg'], ['Examples/2.jpg'], ['Examples/3.jpg'], ['Examples/4.jpg'], ['Examples/5.jpg'], ['Examples/6.jpg'],
            ['Examples/7.jpg'], ['Examples/8.jpg'], ['Examples/9.jpg'], ['Examples/10.jpg'], ['Examples/11.jpg'], ['Examples/12.jpg']]

# Define the gradio app
with gr.Blocks(css="#custom_header {min-height: 2rem; text-align: center} #custom_title {min-height: 2rem}") as demo:
    gr.Markdown("# YOLOv5 Symbol Recognition for CZ3004/SC2079 Multi-Disciplinary Project",
                elem_id="custom_header")
    gr.Markdown("Gradio Demo for YOLOv5 Symbol Recognition for CZ3004 Multi-Disciplinary Project. To use it, simply upload your image, or click one of the examples to load them. CZ3004 is a module in Nanyang Technological University's Computer Science curriculum that involves creating a robot car that can navigate within an arena and around obstacles. Part of the assessment is to go to obstacles and detect alphanumeric symbols pasted on them.", elem_id="custom_title")
    gr.Markdown("The two models available, Week 8 and Week 9, are for different subtasks. Week 8 model (as assessment was done in Week 8 of the school semester), \
                is able to detect all symbols seen in the first three example images below. Week 9 model is limited to just the bullseye, left and right arrow symbols. \
                Additionally, Week 9 model has been further trained on extreme edge cases where there is harsh sunlight behind the symbol/obstacle (seen in some of the examples).", elem_id="custom_title")
    gr.Markdown("Heuristics used are based on AY22-23 Semester 2's edition of MDP. These include ignoring the bullseye symbol, taking only the biggest bounding box, and filtering similar sized detections by the expected position of the symbol based on where the robot is supposed to be relative to the symbol.", elem_id="custom_title")
    gr.Markdown("This demo is part of a guide that is currently work-in-progress, for future CZ3004/SC2079 students to refer to. On a local environment, inference should be around 100ms at worst, and can be made faster with a GPU and/or conversion to a more optimized model format.", elem_id="custom_title")

    with gr.Row():
        with gr.Column():
            with gr.Box():
                gr.Markdown("## Inputs", elem_id="custom_header")
                input_image = gr.inputs.Image(
                    type='pil', label="Original Image")
                btn = gr.Button(value="Submit")
                btn.style(full_width=True)
        with gr.Column():
            with gr.Box():
                gr.Markdown("## Parameters", elem_id="custom_header")
                model_selection = gr.inputs.Radio(
                    ['Week 8', 'Week 9'], type="value", default='Week 8', label='Model Selection')
                toggles = gr.CheckboxGroup(["Ignore Bullseye", "Biggest BBox Only and Position-Based Heuristics",], value=[
                                           "Ignore Bullseye", "Biggest BBox Only and Position-Based Heuristics"], label="Heuristic Toggles")
                radios = gr.inputs.Radio(['Left', 'Center', 'Right', 'Disabled'],
                                         type="value", default='Center', label='Position Heuristic')
    with gr.Row():
        with gr.Box():
            with gr.Column():
                gr.Markdown("## Raw Outputs", elem_id="custom_header")
                output_image = gr.outputs.Image(
                    type="pil", label="Output Image")
                output_json = gr.outputs.JSON(label="Output JSON")
        with gr.Box():
            with gr.Column():
                gr.Markdown("## Filtered Outputs", elem_id="custom_header")
                filtered_image = gr.outputs.Image(
                    type="pil", label="Filtered Output Image")
                filtered_json = gr.outputs.JSON(label="Filtered Output JSON")
    with gr.Row():
        gr.Examples(examples=examples,
                    inputs=input_image,
                    outputs=output_image,
                    fn=yolo,
                    cache_examples=False)
    btn.click(yolo, inputs=[input_image, model_selection, toggles, radios], outputs=[
              output_image, output_json, filtered_image, filtered_json])
# Run the gradio app
demo.launch(debug=True)