fmussari commited on
Commit
18341d9
1 Parent(s): 9e42250

Added URL Functionality

Browse files
Files changed (5) hide show
  1. .gitignore +1 -1
  2. app.py +32 -24
  3. input_object_detection.jpg +0 -0
  4. output.jpg +0 -0
  5. requirements.txt +2 -1
.gitignore CHANGED
@@ -1,5 +1,5 @@
1
  .ipynb_checkpoints
2
  flagged
3
- telecom_object_detection.ipynb
4
  .env
5
  *.png
 
1
  .ipynb_checkpoints
2
  flagged
3
+ *.ipynb
4
  .env
5
  *.png
app.py CHANGED
@@ -1,27 +1,34 @@
1
- # AUTOGENERATED! DO NOT EDIT! File to edit: telecom_object_detection.ipynb.
2
 
3
  # %% auto 0
4
  __all__ = ['title', 'css', 'urls', 'imgs', 'img_samples', 'fig2img', 'custom_vision_detect_objects', 'set_example_url',
5
  'set_example_image', 'detect_objects']
6
 
7
- # %% telecom_object_detection.ipynb 2
8
  import gradio as gr
9
  import numpy as np
10
  import os
11
  import io
12
 
13
- import requests
14
 
15
  from pathlib import Path
16
 
17
- # %% telecom_object_detection.ipynb 6
 
 
 
 
 
 
 
18
  from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
19
  from msrest.authentication import ApiKeyCredentials
20
  from matplotlib import pyplot as plt
21
  from PIL import Image, ImageDraw, ImageFont
22
  from dotenv import load_dotenv
23
 
24
- # %% telecom_object_detection.ipynb 11
25
  def fig2img(fig):
26
  buf = io.BytesIO()
27
  fig.savefig(buf)
@@ -94,7 +101,7 @@ def custom_vision_detect_objects(image_file: Path):
94
  fig.savefig(outputfile)
95
  print('Resulabsts saved in ', outputfile)
96
 
97
- # %% telecom_object_detection.ipynb 15
98
  title = """<h1 id="title">Telecom Object Detection with Azure Custom Vision</h1>"""
99
 
100
  css = '''
@@ -103,41 +110,42 @@ h1#title {
103
  }
104
  '''
105
 
106
- # %% telecom_object_detection.ipynb 16
107
- urls = ["https://c8.alamy.com/comp/J2AB4K/the-new-york-stock-exchange-on-the-wall-street-in-new-york-J2AB4K.jpg"]
108
  imgs = [path.as_posix() for path in sorted(Path('images').rglob('*.jpg'))]
109
  img_samples = [[path.as_posix()] for path in sorted(Path('images').rglob('*.jpg'))]
110
 
111
- # %% telecom_object_detection.ipynb 17
112
  def set_example_url(example: list) -> dict:
 
113
  return gr.Textbox.update(value=example[0])
114
 
115
  def set_example_image(example: list) -> dict:
116
  return gr.Image.update(value=example[0])
117
 
118
- def detect_objects(image_input:Image):
119
- #if validators.url(url_input):
120
- # image = Image.open(requests.get(url_input, stream=True).raw)
121
- #elif image_input:
122
- # image = image_input
123
- print(image_input)
124
- print(image_input.size)
125
- w, h = image_input.size
 
 
126
 
127
  if max(w, h) > 1_200:
128
  factor = 1_200 / max(w, h)
129
  factor = 1
130
  size = (int(w*factor), int(h*factor))
131
- image_input = image_input.resize(size, resample=Image.Resampling.BILINEAR)
132
 
133
  resized_image_path = "input_object_detection.jpg"
134
- image_input.save(resized_image_path)
135
 
136
- #return image_input
137
- #return custom_vision_detect_objects(Path(filename[0]))
138
  return custom_vision_detect_objects(resized_image_path)
139
 
140
- # %% telecom_object_detection.ipynb 19
141
  with gr.Blocks(css=css) as demo:
142
 
143
  gr.Markdown(title)
@@ -166,8 +174,8 @@ with gr.Blocks(css=css) as demo:
166
  example_url = gr.Dataset(components=[url_input], samples=[[str(url)] for url in urls])
167
  url_button = gr.Button("Detect")
168
 
169
- url_button.click(detect_objects, inputs=[url_input], outputs=img_output_from_url)
170
- image_button.click(detect_objects, inputs=[image_input], outputs=image_output)
171
  #image_button.click(detect_objects, inputs=[example_images], outputs=image_output)
172
 
173
  example_url.click(fn=set_example_url, inputs=[example_url], outputs=[url_input])
 
1
+ # AUTOGENERATED! DO NOT EDIT! File to edit: telecom_object_detection-Iteration_2.ipynb.
2
 
3
  # %% auto 0
4
  __all__ = ['title', 'css', 'urls', 'imgs', 'img_samples', 'fig2img', 'custom_vision_detect_objects', 'set_example_url',
5
  'set_example_image', 'detect_objects']
6
 
7
+ # %% telecom_object_detection-Iteration_2.ipynb 2
8
  import gradio as gr
9
  import numpy as np
10
  import os
11
  import io
12
 
13
+ import requests, validators
14
 
15
  from pathlib import Path
16
 
17
+ # %% telecom_object_detection-Iteration_2.ipynb 4
18
+ ############################################
19
+ ### This code is based and adapted from:
20
+ # https://github.com/MicrosoftLearning/AI-102-AIEngineer/blob/master/18-object-detection/Python/test-detector/test-detector.py
21
+ # https://huggingface.co/spaces/Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS/blob/main/app.py
22
+ ############################################
23
+
24
+ # %% telecom_object_detection-Iteration_2.ipynb 7
25
  from azure.cognitiveservices.vision.customvision.prediction import CustomVisionPredictionClient
26
  from msrest.authentication import ApiKeyCredentials
27
  from matplotlib import pyplot as plt
28
  from PIL import Image, ImageDraw, ImageFont
29
  from dotenv import load_dotenv
30
 
31
+ # %% telecom_object_detection-Iteration_2.ipynb 12
32
  def fig2img(fig):
33
  buf = io.BytesIO()
34
  fig.savefig(buf)
 
101
  fig.savefig(outputfile)
102
  print('Resulabsts saved in ', outputfile)
103
 
104
+ # %% telecom_object_detection-Iteration_2.ipynb 17
105
  title = """<h1 id="title">Telecom Object Detection with Azure Custom Vision</h1>"""
106
 
107
  css = '''
 
110
  }
111
  '''
112
 
113
+ # %% telecom_object_detection-Iteration_2.ipynb 18
114
+ urls = ["https://www.dropbox.com/s/y5bk8om5ucu46d3/747.jpg?dl=1"]
115
  imgs = [path.as_posix() for path in sorted(Path('images').rglob('*.jpg'))]
116
  img_samples = [[path.as_posix()] for path in sorted(Path('images').rglob('*.jpg'))]
117
 
118
+ # %% telecom_object_detection-Iteration_2.ipynb 21
119
  def set_example_url(example: list) -> dict:
120
+ print(gr.Textbox.update(value=example[0]))
121
  return gr.Textbox.update(value=example[0])
122
 
123
  def set_example_image(example: list) -> dict:
124
  return gr.Image.update(value=example[0])
125
 
126
+ def detect_objects(url_input:str, image_input:Image):
127
+ print(f"{url_input=}")
128
+ if validators.url(url_input):
129
+ image = Image.open(requests.get(url_input, stream=True).raw)
130
+ elif image_input:
131
+ image = image_input
132
+
133
+ print(image)
134
+ print(image.size)
135
+ w, h = image.size
136
 
137
  if max(w, h) > 1_200:
138
  factor = 1_200 / max(w, h)
139
  factor = 1
140
  size = (int(w*factor), int(h*factor))
141
+ image = image.resize(size, resample=Image.Resampling.BILINEAR)
142
 
143
  resized_image_path = "input_object_detection.jpg"
144
+ image.save(resized_image_path)
145
 
 
 
146
  return custom_vision_detect_objects(resized_image_path)
147
 
148
+ # %% telecom_object_detection-Iteration_2.ipynb 23
149
  with gr.Blocks(css=css) as demo:
150
 
151
  gr.Markdown(title)
 
174
  example_url = gr.Dataset(components=[url_input], samples=[[str(url)] for url in urls])
175
  url_button = gr.Button("Detect")
176
 
177
+ url_button.click(detect_objects, inputs=[url_input,image_input], outputs=img_output_from_url)
178
+ image_button.click(detect_objects, inputs=[url_input,image_input], outputs=image_output)
179
  #image_button.click(detect_objects, inputs=[example_images], outputs=image_output)
180
 
181
  example_url.click(fn=set_example_url, inputs=[example_url], outputs=[url_input])
input_object_detection.jpg ADDED
output.jpg CHANGED
requirements.txt CHANGED
@@ -1,2 +1,3 @@
1
  azure-cognitiveservices-vision-customvision==3.1.0
2
- python-dotenv==0.20.0
 
 
1
  azure-cognitiveservices-vision-customvision==3.1.0
2
+ python-dotenv==0.20.0
3
+ validators