kadirnar commited on
Commit
bbe4238
1 Parent(s): 0f3aa96

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -315
app.py CHANGED
@@ -1,319 +1,30 @@
1
- import gradio as gr
2
- from demo import automask_image_app, automask_video_app, sahi_autoseg_app
3
-
4
-
5
-
6
- def image_app():
7
- with gr.Blocks():
8
- with gr.Row():
9
- with gr.Column():
10
- seg_automask_image_file = gr.Image(type="filepath").style(height=260)
11
- with gr.Row():
12
- with gr.Column():
13
- seg_automask_image_model_type = gr.Dropdown(
14
- choices=[
15
- "vit_h",
16
- "vit_l",
17
- "vit_b",
18
- ],
19
- value="vit_l",
20
- label="Model Type",
21
- )
22
-
23
- seg_automask_image_min_area = gr.Number(
24
- value=0,
25
- label="Min Area",
26
- )
27
- with gr.Row():
28
- with gr.Column():
29
- seg_automask_image_points_per_side = gr.Slider(
30
- minimum=0,
31
- maximum=32,
32
- step=2,
33
- value=16,
34
- label="Points per Side",
35
- )
36
-
37
- seg_automask_image_points_per_batch = gr.Slider(
38
- minimum=0,
39
- maximum=64,
40
- step=2,
41
- value=32,
42
- label="Points per Batch",
43
- )
44
-
45
- seg_automask_image_predict = gr.Button(value="Generator")
46
-
47
- with gr.Column():
48
- output_image = gr.Image()
49
-
50
- seg_automask_image_predict.click(
51
- fn=automask_image_app,
52
- inputs=[
53
- seg_automask_image_file,
54
- seg_automask_image_model_type,
55
- seg_automask_image_points_per_side,
56
- seg_automask_image_points_per_batch,
57
- seg_automask_image_min_area,
58
- ],
59
- outputs=[output_image],
60
- )
61
-
62
- gr.Examples(
63
- examples=[
64
- [
65
- "testv3.jpeg",
66
- "vit_l",
67
- 16,
68
- 32,
69
- 0,
70
- ],
71
-
72
- ],
73
- fn=automask_image_app,
74
- inputs=[
75
- seg_automask_image_file,
76
- seg_automask_image_model_type,
77
- seg_automask_image_points_per_side,
78
- seg_automask_image_points_per_batch,
79
- seg_automask_image_min_area,
80
- ],
81
- outputs=[output_image],
82
- cache_examples=True,
83
- )
84
-
85
-
86
- def video_app():
87
- with gr.Blocks():
88
- with gr.Row():
89
- with gr.Column():
90
- seg_automask_video_file = gr.Video().style(height=260)
91
- with gr.Row():
92
- with gr.Column():
93
- seg_automask_video_model_type = gr.Dropdown(
94
- choices=[
95
- "vit_h",
96
- "vit_l",
97
- "vit_b",
98
- ],
99
- value="vit_l",
100
- label="Model Type",
101
- )
102
- seg_automask_video_min_area = gr.Number(
103
- value=1000,
104
- label="Min Area",
105
- )
106
-
107
- with gr.Row():
108
- with gr.Column():
109
- seg_automask_video_points_per_side = gr.Slider(
110
- minimum=0,
111
- maximum=32,
112
- step=2,
113
- value=16,
114
- label="Points per Side",
115
- )
116
-
117
- seg_automask_video_points_per_batch = gr.Slider(
118
- minimum=0,
119
- maximum=64,
120
- step=2,
121
- value=32,
122
- label="Points per Batch",
123
- )
124
-
125
- seg_automask_video_predict = gr.Button(value="Generator")
126
- with gr.Column():
127
- output_video = gr.Video()
128
 
129
- seg_automask_video_predict.click(
130
- fn=automask_video_app,
131
- inputs=[
132
- seg_automask_video_file,
133
- seg_automask_video_model_type,
134
- seg_automask_video_points_per_side,
135
- seg_automask_video_points_per_batch,
136
- seg_automask_video_min_area,
137
- ],
138
- outputs=[output_video],
139
- )
140
-
141
- gr.Examples(
142
- examples=[
143
- [
144
- "testv2.mp4",
145
- "vit_l",
146
- 16,
147
- 32,
148
- 0,
149
- ],
150
- ],
151
- fn=automask_video_app,
152
- inputs=[
153
- seg_automask_video_file,
154
- seg_automask_video_model_type,
155
- seg_automask_video_points_per_side,
156
- seg_automask_video_points_per_batch,
157
- seg_automask_video_min_area,
158
- ],
159
- outputs=[output_video],
160
- cache_examples=True,
161
- )
162
-
163
-
164
- def sahi_app():
165
- with gr.Blocks():
166
- with gr.Row():
167
- with gr.Column():
168
- sahi_image_file = gr.Image(type="filepath").style(height=260)
169
- sahi_autoseg_model_type = gr.Dropdown(
170
- choices=[
171
- "vit_h",
172
- "vit_l",
173
- "vit_b",
174
- ],
175
- value="vit_l",
176
- label="Sam Model Type",
177
- )
178
-
179
- with gr.Row():
180
- with gr.Column():
181
- sahi_model_type = gr.Dropdown(
182
- choices=[
183
- "yolov5",
184
- "yolov8",
185
- ],
186
- value="yolov5",
187
- label="Detector Model Type",
188
- )
189
- sahi_image_size = gr.Slider(
190
- minimum=0,
191
- maximum=1280,
192
- step=32,
193
- value=640,
194
- label="Image Size",
195
- )
196
-
197
- sahi_overlap_width = gr.Slider(
198
- minimum=0,
199
- maximum=1,
200
- step=0.1,
201
- value=0.2,
202
- label="Overlap Width",
203
- )
204
-
205
- sahi_slice_width = gr.Slider(
206
- minimum=0,
207
- maximum=640,
208
- step=32,
209
- value=256,
210
- label="Slice Width",
211
- )
212
-
213
- with gr.Row():
214
- with gr.Column():
215
- sahi_model_path = gr.Dropdown(
216
- choices=[
217
- "yolov5l.pt",
218
- "yolov5l6.pt",
219
- "yolov8l.pt",
220
- "yolov8x.pt",
221
- ],
222
- value="yolov5l6.pt",
223
- label="Detector Model Path",
224
- )
225
-
226
- sahi_conf_th = gr.Slider(
227
- minimum=0,
228
- maximum=1,
229
- step=0.1,
230
- value=0.2,
231
- label="Confidence Threshold",
232
- )
233
- sahi_overlap_height = gr.Slider(
234
- minimum=0,
235
- maximum=1,
236
- step=0.1,
237
- value=0.2,
238
- label="Overlap Height",
239
- )
240
- sahi_slice_height = gr.Slider(
241
- minimum=0,
242
- maximum=640,
243
- step=32,
244
- value=256,
245
- label="Slice Height",
246
- )
247
- sahi_image_predict = gr.Button(value="Generator")
248
-
249
- with gr.Column():
250
- output_image = gr.Image()
251
-
252
- sahi_image_predict.click(
253
- fn=sahi_autoseg_app,
254
- inputs=[
255
- sahi_image_file,
256
- sahi_autoseg_model_type,
257
- sahi_model_type,
258
- sahi_model_path,
259
- sahi_conf_th,
260
- sahi_image_size,
261
- sahi_slice_height,
262
- sahi_slice_width,
263
- sahi_overlap_height,
264
- sahi_overlap_width,
265
- ],
266
- outputs=[output_image],
267
- )
268
-
269
- gr.Examples(
270
- examples=[
271
- [
272
- "testv1.jpg",
273
- "vit_l",
274
- "yolov5",
275
- "yolov5l6.pt",
276
- 0.2,
277
- 1280,
278
- 256,
279
- 256,
280
- 0.2,
281
- 0.2,
282
- ],
283
- ],
284
- fn=sahi_autoseg_app,
285
- inputs=[
286
- sahi_image_file,
287
- sahi_autoseg_model_type,
288
- sahi_model_type,
289
- sahi_model_path,
290
- sahi_conf_th,
291
- sahi_image_size,
292
- sahi_slice_height,
293
- sahi_slice_width,
294
- sahi_overlap_height,
295
- sahi_overlap_width,
296
- ],
297
- outputs=[output_image],
298
- cache_examples=True,
299
- )
300
-
301
-
302
- def metaseg_app():
303
- app = gr.Blocks()
304
- with app:
305
- with gr.Row():
306
- with gr.Column():
307
- with gr.Tab("Image"):
308
- image_app()
309
- with gr.Tab("Video"):
310
- video_app()
311
- with gr.Tab("SAHI"):
312
- sahi_app()
313
 
314
- app.queue(concurrency_count=1)
315
- app.launch(debug=True, enable_queue=True)
316
 
317
 
318
- if __name__ == "__main__":
319
- metaseg_app()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from whisperplus.app import youtube_url_to_text_app, speaker_diarization_app
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
+ import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
 
 
5
 
6
 
7
+ gradio_app = gr.Blocks()
8
+ with gradio_app:
9
+ gr.HTML(
10
+ """
11
+ <h1 style='text-align: center'>
12
+ WhisperPlus: Advancing Speech-to-Text Processing 🚀
13
+ </h1>
14
+ """)
15
+ gr.HTML(
16
+ """
17
+ <h3 style='text-align: center'>
18
+ Follow me for more!
19
+ <a href='https://twitter.com/kadirnar_ai' target='_blank'>Twitter</a> | <a href='https://github.com/kadirnar' target='_blank'>Github</a> | <a href='https://www.linkedin.com/in/kadir-nar/' target='_blank'>Linkedin</a> | <a href='https://www.huggingface.co/kadirnar/' target='_blank'>HuggingFace</a>
20
+ </h3>
21
+ """)
22
+ with gr.Row():
23
+ with gr.Column():
24
+ with gr.Tab(label="Youtube URL to Text"):
25
+ youtube_url_to_text_app()
26
+ with gr.Tab(label="Speaker Diarization"):
27
+ speaker_diarization_app()
28
+
29
+ gradio_app.queue()
30
+ gradio_app.launch(debug=True)