Daniel Cerda Escobar commited on
Commit
f78ddd2
β€’
1 Parent(s): 1501d8b

Update application file

Browse files
Files changed (1) hide show
  1. app.py +245 -2
app.py CHANGED
@@ -1,5 +1,248 @@
1
  import streamlit as st
 
 
 
 
 
 
 
2
 
3
- x = st.slider('Select a value')
 
 
 
4
 
5
- st.write(x, 'squared is', x * x)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import streamlit as st
2
+ import sahi.utils.file
3
+ import sahi.utils.mmdet
4
+ from sahi import AutoDetectionModel
5
+ from PIL import Image
6
+ import random
7
+ from utils import sahi_mmdet_inference
8
+ from streamlit_image_comparison import image_comparison
9
 
10
+ MMDET_YOLOX_TINY_MODEL_URL = "https://huggingface.co/fcakyon/mmdet-yolox-tiny/resolve/main/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth"
11
+ MMDET_YOLOX_TINY_MODEL_PATH = "yolox.pt"
12
+ MMDET_YOLOX_TINY_CONFIG_URL = "https://huggingface.co/fcakyon/mmdet-yolox-tiny/raw/main/yolox_tiny_8x8_300e_coco.py"
13
+ MMDET_YOLOX_TINY_CONFIG_PATH = "config.py"
14
 
15
+ IMAGE_TO_URL = {
16
+ "apple_tree.jpg": "https://user-images.githubusercontent.com/34196005/142730935-2ace3999-a47b-49bb-83e0-2bdd509f1c90.jpg",
17
+ "highway.jpg": "https://user-images.githubusercontent.com/34196005/142730936-1b397756-52e5-43be-a949-42ec0134d5d8.jpg",
18
+ "highway2.jpg": "https://user-images.githubusercontent.com/34196005/142742871-bf485f84-0355-43a3-be86-96b44e63c3a2.jpg",
19
+ "highway3.jpg": "https://user-images.githubusercontent.com/34196005/142742872-1fefcc4d-d7e6-4c43-bbb7-6b5982f7e4ba.jpg",
20
+ "highway2-yolox.jpg": "https://user-images.githubusercontent.com/34196005/143309873-c0c1f31c-c42e-4a36-834e-da0a2336bb19.jpg",
21
+ "highway2-sahi.jpg": "https://user-images.githubusercontent.com/34196005/143309867-42841f5a-9181-4d22-b570-65f90f2da231.jpg",
22
+ }
23
+
24
+
25
+ @st.cache(allow_output_mutation=True, show_spinner=False)
26
+ def download_comparison_images():
27
+ sahi.utils.file.download_from_url(
28
+ "https://user-images.githubusercontent.com/34196005/143309873-c0c1f31c-c42e-4a36-834e-da0a2336bb19.jpg",
29
+ "highway2-yolox.jpg",
30
+ )
31
+ sahi.utils.file.download_from_url(
32
+ "https://user-images.githubusercontent.com/34196005/143309867-42841f5a-9181-4d22-b570-65f90f2da231.jpg",
33
+ "highway2-sahi.jpg",
34
+ )
35
+
36
+
37
+ @st.cache(allow_output_mutation=True, show_spinner=False)
38
+ def get_model():
39
+ sahi.utils.file.download_from_url(
40
+ MMDET_YOLOX_TINY_MODEL_URL,
41
+ MMDET_YOLOX_TINY_MODEL_PATH,
42
+ )
43
+ sahi.utils.file.download_from_url(
44
+ MMDET_YOLOX_TINY_CONFIG_URL,
45
+ MMDET_YOLOX_TINY_CONFIG_PATH,
46
+ )
47
+
48
+ detection_model = AutoDetectionModel.from_pretrained(
49
+ model_type='mmdet',
50
+ model_path=MMDET_YOLOX_TINY_MODEL_PATH,
51
+ config_path=MMDET_YOLOX_TINY_CONFIG_PATH,
52
+ confidence_threshold=0.5,
53
+ device="cpu",
54
+ )
55
+ return detection_model
56
+
57
+
58
+ class SpinnerTexts:
59
+ def __init__(self):
60
+ self.ind_history_list = []
61
+ self.text_list = [
62
+ "Meanwhile check out [MMDetection Colab notebook of SAHI](https://colab.research.google.com/github/obss/sahi/blob/main/demo/inference_for_mmdetection.ipynb)!",
63
+ "Meanwhile check out [YOLOv5 Colab notebook of SAHI](https://colab.research.google.com/github/obss/sahi/blob/main/demo/inference_for_yolov5.ipynb)!",
64
+ "Meanwhile check out [aerial object detection with SAHI](https://blog.ml6.eu/how-to-detect-small-objects-in-very-large-images-70234bab0f98?gi=b434299595d4)!",
65
+ "Meanwhile check out [COCO Utilities of SAHI](https://github.com/obss/sahi/blob/main/docs/COCO.md)!",
66
+ "Meanwhile check out [FiftyOne utilities of SAHI](https://github.com/obss/sahi#fiftyone-utilities)!",
67
+ "Meanwhile [give a Github star to SAHI](https://github.com/obss/sahi/stargazers)!",
68
+ "Meanwhile see [how easy is to install SAHI](https://github.com/obss/sahi#getting-started)!",
69
+ "Meanwhile check out [Medium blogpost of SAHI](https://medium.com/codable/sahi-a-vision-library-for-performing-sliced-inference-on-large-images-small-objects-c8b086af3b80)!",
70
+ "Meanwhile try out [YOLOv5 HF Spaces demo of SAHI](https://huggingface.co/spaces/fcakyon/sahi-yolov5)!",
71
+ ]
72
+
73
+ # def _store(self, ind):
74
+ # if len(self.ind_history_list) == 6:
75
+ # self.ind_history_list.pop(0)
76
+ # self.ind_history_list.append(ind)
77
+
78
+ def get(self):
79
+ ind = 0
80
+ while ind in self.ind_history_list:
81
+ ind = random.randint(0, len(self.text_list) - 1)
82
+ self._store(ind)
83
+ return self.text_list[ind]
84
+
85
+
86
+ st.set_page_config(
87
+ page_title="Small Object Detection with SAHI + YOLOX",
88
+ page_icon="πŸš€",
89
+ layout="centered",
90
+ initial_sidebar_state="auto",
91
+ )
92
+
93
+ download_comparison_images()
94
+
95
+ if "last_spinner_texts" not in st.session_state:
96
+ st.session_state["last_spinner_texts"] = SpinnerTexts()
97
+
98
+ if "output_1" not in st.session_state:
99
+ st.session_state["output_1"] = Image.open("highway2-yolox.jpg")
100
+
101
+ if "output_2" not in st.session_state:
102
+ st.session_state["output_2"] = Image.open("highway2-sahi.jpg")
103
+
104
+ st.markdown(
105
+ """
106
+ <h2 style='text-align: center'>
107
+ Small Object Detection <br />
108
+ with SAHI + YOLOX
109
+ </h2>
110
+ """,
111
+ unsafe_allow_html=True,
112
+ )
113
+ st.markdown(
114
+ """
115
+ <p style='text-align: center'>
116
+ <a href='https://github.com/obss/sahi' target='_blank'>SAHI Github</a> | <a href='https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox' target='_blank'>YOLOX Github</a> | <a href='https://huggingface.co/spaces/fcakyon/sahi-yolov5' target='_blank'>SAHI+YOLOv5 Demo</a>
117
+ <br />
118
+ Follow me for more! <a href='https://twitter.com/fcakyon' target='_blank'> <img src="https://img.icons8.com/color/48/000000/twitter--v1.png" height="30"></a><a href='https://github.com/fcakyon' target='_blank'><img src="https://img.icons8.com/fluency/48/000000/github.png" height="27"></a><a href='https://www.linkedin.com/in/fcakyon/' target='_blank'><img src="https://img.icons8.com/fluency/48/000000/linkedin.png" height="30"></a> <a href='https://fcakyon.medium.com/' target='_blank'><img src="https://img.icons8.com/ios-filled/48/000000/medium-monogram.png" height="26"></a>
119
+ </p>
120
+ """,
121
+ unsafe_allow_html=True,
122
+ )
123
+
124
+ st.write("##")
125
+
126
+ with st.expander("Usage"):
127
+ st.markdown(
128
+ """
129
+ <p>
130
+ 1. Upload or select the input image πŸ–ΌοΈ
131
+ <br />
132
+ 2. (Optional) Set SAHI parameters βœ”οΈ
133
+ <br />
134
+ 3. Press to "πŸš€ Perform Prediction"
135
+ <br />
136
+ 4. Enjoy sliding image comparison πŸ”₯
137
+ </p>
138
+ """,
139
+ unsafe_allow_html=True,
140
+ )
141
+
142
+ st.write("##")
143
+
144
+ col1, col2, col3 = st.columns([6, 1, 6])
145
+ with col1:
146
+ st.markdown(f"##### Set input image:")
147
+
148
+ # set input image by upload
149
+ image_file = st.file_uploader(
150
+ "Upload an image to test:", type=["jpg", "jpeg", "png"]
151
+ )
152
+
153
+ # set input image from exapmles
154
+ def slider_func(option):
155
+ option_to_id = {
156
+ "apple_tree.jpg": str(1),
157
+ "highway.jpg": str(2),
158
+ "highway2.jpg": str(3),
159
+ "highway3.jpg": str(4),
160
+ }
161
+ return option_to_id[option]
162
+
163
+ slider = st.select_slider(
164
+ "Or select from example images:",
165
+ options=["apple_tree.jpg", "highway.jpg", "highway2.jpg", "highway3.jpg"],
166
+ format_func=slider_func,
167
+ value="highway2.jpg",
168
+ )
169
+
170
+ # visualize input image
171
+ if image_file is not None:
172
+ image = Image.open(image_file)
173
+ else:
174
+ image = sahi.utils.cv.read_image_as_pil(IMAGE_TO_URL[slider])
175
+ st.image(image, width=300)
176
+
177
+ with col3:
178
+ st.markdown(f"##### Set SAHI parameters:")
179
+
180
+ slice_size = st.number_input("slice_size", min_value=256, value=512, step=256)
181
+ overlap_ratio = st.number_input(
182
+ "overlap_ratio", min_value=0.0, max_value=0.6, value=0.2, step=0.2
183
+ )
184
+ postprocess_type = st.selectbox(
185
+ "postprocess_type", options=["NMS", "GREEDYNMM"], index=0
186
+ )
187
+ postprocess_match_metric = st.selectbox(
188
+ "postprocess_match_metric", options=["IOU", "IOS"], index=0
189
+ )
190
+ postprocess_match_threshold = st.number_input(
191
+ "postprocess_match_threshold", value=0.5, step=0.1
192
+ )
193
+ postprocess_class_agnostic = st.checkbox("postprocess_class_agnostic", value=True)
194
+
195
+ col1, col2, col3 = st.columns([4, 3, 4])
196
+ with col2:
197
+ submit = st.button("πŸš€ Perform Prediction")
198
+
199
+ if submit:
200
+ # perform prediction
201
+ with st.spinner(
202
+ text="Downloading model weight.. "
203
+ + st.session_state["last_spinner_texts"].get()
204
+ ):
205
+ detection_model = get_model()
206
+
207
+ image_size = 416
208
+
209
+ with st.spinner(
210
+ text="Performing prediction.. " + st.session_state["last_spinner_texts"].get()
211
+ ):
212
+ output_1, output_2 = sahi_mmdet_inference(
213
+ image,
214
+ detection_model,
215
+ image_size=image_size,
216
+ slice_height=slice_size,
217
+ slice_width=slice_size,
218
+ overlap_height_ratio=overlap_ratio,
219
+ overlap_width_ratio=overlap_ratio,
220
+ postprocess_type=postprocess_type,
221
+ postprocess_match_metric=postprocess_match_metric,
222
+ postprocess_match_threshold=postprocess_match_threshold,
223
+ postprocess_class_agnostic=postprocess_class_agnostic,
224
+ )
225
+
226
+ st.session_state["output_1"] = output_1
227
+ st.session_state["output_2"] = output_2
228
+
229
+ st.markdown(f"##### YOLOX Standard vs SAHI Prediction:")
230
+ static_component = image_comparison(
231
+ img1=st.session_state["output_1"],
232
+ img2=st.session_state["output_2"],
233
+ label1="YOLOX",
234
+ label2="SAHI+YOLOX",
235
+ width=700,
236
+ starting_position=50,
237
+ show_labels=True,
238
+ make_responsive=True,
239
+ in_memory=True,
240
+ )
241
+ st.markdown(
242
+ """
243
+ <p style='text-align: center'>
244
+ prepared with <a href='https://github.com/fcakyon/streamlit-image-comparison' target='_blank'>streamlit-image-comparison</a>
245
+ </p>
246
+ """,
247
+ unsafe_allow_html=True,
248
+ )