File size: 6,906 Bytes
18341d9
5e06ef0
 
9e42250
 
5e06ef0
18341d9
5e06ef0
e23ee93
 
 
 
18341d9
e23ee93
c943497
5e06ef0
18341d9
 
 
 
 
 
 
 
e23ee93
 
 
 
 
 
18341d9
e23ee93
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18341d9
5e06ef0
 
 
 
 
 
 
 
18341d9
 
e23ee93
 
c943497
18341d9
e23ee93
18341d9
e23ee93
 
 
 
 
18341d9
 
 
 
 
 
 
 
 
 
e23ee93
 
 
9e42250
e23ee93
18341d9
e23ee93
9e42250
18341d9
e23ee93
9e42250
6916ee6
18341d9
1fabc95
 
c943497
 
 
1fabc95
c943497
e23ee93
 
 
c943497
1fabc95
4e2ce6e
1fabc95
e23ee93
 
 
1fabc95
 
 
c943497
2fe9acc
c943497
 
 
2fe9acc
6916ee6
c943497
18341d9
 
e23ee93
 
 
 
1fabc95
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
# AUTOGENERATED! DO NOT EDIT! File to edit: telecom_object_detection-Iteration_2.ipynb.

# %% auto 0
__all__ = ['title', 'css', 'urls', 'imgs', 'img_samples', 'fig2img', 'custom_vision_detect_objects', 'set_example_url',
           'set_example_image', 'detect_objects']

# %% telecom_object_detection-Iteration_2.ipynb 2
import gradio as gr
import numpy as np
import os
import io

import requests, validators

from pathlib import Path

# %% telecom_object_detection-Iteration_2.ipynb 4
############################################
### This code is based and adapted from: 
# https://github.com/MicrosoftLearning/AI-102-AIEngineer/blob/master/18-object-detection/Python/test-detector/test-detector.py
# https://huggingface.co/spaces/Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS/blob/main/app.py
############################################

# %% telecom_object_detection-Iteration_2.ipynb 7
from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
from msrest.authentication import ApiKeyCredentials
from matplotlib import pyplot as plt
from PIL import Image, ImageDraw, ImageFont
from dotenv import load_dotenv

# %% telecom_object_detection-Iteration_2.ipynb 12
def fig2img(fig):
    buf = io.BytesIO()
    fig.savefig(buf)
    buf.seek(0)
    img = Image.open(buf)
    return img
    
def custom_vision_detect_objects(image_file: Path):
    dpi = 100

    # Get Configuration Settings
    load_dotenv()
    prediction_endpoint = os.getenv('PredictionEndpoint')
    prediction_key = os.getenv('PredictionKey')
    project_id = os.getenv('ProjectID')
    model_name = os.getenv('ModelName')

    # Authenticate a client for the training API
    credentials = ApiKeyCredentials(in_headers={"Prediction-key": prediction_key})
    prediction_client = CustomVisionPredictionClient(endpoint=prediction_endpoint, credentials=credentials)

    # Load image and get height, width and channels
    #image_file = 'produce.jpg'
    print('Detecting objects in', image_file)
    image = Image.open(image_file)
    h, w, ch = np.array(image).shape

    # Detect objects in the test image
    with open(image_file, mode="rb") as image_data:
        results = prediction_client.detect_image(project_id, model_name, image_data)
    
    # Create a figure for the results
    fig = plt.figure(figsize=(w/dpi, h/dpi))
    plt.axis('off')

    # Display the image with boxes around each detected object
    draw = ImageDraw.Draw(image)
    lineWidth = int(w/800)
    color = 'cyan'

    for prediction in results.predictions:
        # Only show objects with a > 50% probability
        if (prediction.probability*100) > 50:
            # Box coordinates and dimensions are proportional - convert to absolutes
            left = prediction.bounding_box.left * w 
            top = prediction.bounding_box.top * h 
            height = prediction.bounding_box.height * h
            width =  prediction.bounding_box.width * w

            # Draw the box
            points = ((left,top), (left+width,top), (left+width,top+height), (left,top+height), (left,top))
            draw.line(points, fill=color, width=lineWidth)

            # Add the tag name and probability
            #plt.annotate(prediction.tag_name + ": {0:.2f}%".format(prediction.probability * 100),(left,top), backgroundcolor=color)
            plt.annotate(
                prediction.tag_name + ": {0:.0f}%".format(prediction.probability * 100),
                (left, top-1.372*h/dpi), 
                backgroundcolor=color,
                fontsize=max(w/dpi, h/dpi), 
                fontfamily='monospace'
            )

    plt.imshow(image)
    plt.tight_layout(pad=0)
    
    return fig2img(fig)

    outputfile = 'output.jpg'
    fig.savefig(outputfile)
    print('Resulabsts saved in ', outputfile)

# %% telecom_object_detection-Iteration_2.ipynb 17
title = """<h1 id="title">Telecom Object Detection with Azure Custom Vision</h1>"""

css = '''
h1#title {
  text-align: center;
}
'''

# %% telecom_object_detection-Iteration_2.ipynb 18
urls = ["https://www.dropbox.com/s/y5bk8om5ucu46d3/747.jpg?dl=1"]
imgs = [path.as_posix() for path in sorted(Path('images').rglob('*.jpg'))]
img_samples = [[path.as_posix()] for path in sorted(Path('images').rglob('*.jpg'))]

# %% telecom_object_detection-Iteration_2.ipynb 21
def set_example_url(example: list) -> dict:
    print(gr.Textbox.update(value=example[0]))
    return gr.Textbox.update(value=example[0])

def set_example_image(example: list) -> dict:
    return gr.Image.update(value=example[0])

def detect_objects(url_input:str, image_input:Image):
    print(f"{url_input=}")
    if validators.url(url_input):
        image = Image.open(requests.get(url_input, stream=True).raw)
    elif image_input:
        image = image_input
        
    print(image)
    print(image.size)
    w, h = image.size
    
    if max(w, h) > 1_200:
        factor = 1_200 / max(w, h)
        factor = 1
        size = (int(w*factor), int(h*factor))
        image = image.resize(size, resample=Image.Resampling.BILINEAR)
    
    resized_image_path = "input_object_detection.jpg"
    image.save(resized_image_path)
    
    return custom_vision_detect_objects(resized_image_path)

# %% telecom_object_detection-Iteration_2.ipynb 23
with gr.Blocks(css=css) as demo:
    
    gr.Markdown(title)
    
    with gr.Tabs():
        with gr.TabItem("Image Upload"):
            with gr.Row():
                image_input = gr.Image(type='pil')
                image_output = gr.Image(shape=(650,650))
                
            with gr.Row(): 
                """example_images = gr.Dataset(components=[img_input],
                    samples=[[path.as_posix()] for path in sorted(Path('images').rglob('*.jpg'))]
                )"""
                #example_images = gr.Examples(examples=imgs, inputs=image_input)
                example_images = gr.Dataset(components=[image_input], samples=img_samples)
            
            image_button = gr.Button("Detect")
        
        with gr.TabItem("Image URL"):
            with gr.Row():
                url_input = gr.Textbox(lines=2, label='Enter valid image URL here..')
                img_output_from_url = gr.Image(shape=(650,650))
                
            with gr.Row():
                example_url = gr.Dataset(components=[url_input], samples=[[str(url)] for url in urls])
            url_button = gr.Button("Detect")
            
    url_button.click(detect_objects, inputs=[url_input,image_input], outputs=img_output_from_url)
    image_button.click(detect_objects, inputs=[url_input,image_input], outputs=image_output)
    #image_button.click(detect_objects, inputs=[example_images], outputs=image_output)
    
    example_url.click(fn=set_example_url, inputs=[example_url], outputs=[url_input])
    example_images.click(fn=set_example_image, inputs=[example_images], outputs=[image_input])

demo.launch()