Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -175,12 +175,12 @@ base_image = gr.Interface(
|
|
175 |
# inputs=[gr.Image(type="pil"),gr.Slider(minimum=0.01, maximum=1, value=0.38 ,label="Threshold",info="[not in used]to set prediction confidence threshold")],
|
176 |
inputs=[gr.Image(type="pil"),gr.Slider(minimum=0.01, maximum=1,value=threshold_d ,label="[WIP]To change default 0.38 prediction confidence Threshold",info="[not in used]Select image with 0.38 threshold to start, you may amend threshold after each first image inference")],
|
177 |
|
178 |
-
outputs=[gr.Image(type="pil",label="Base Model Inference"),gr.Image(type="pil",label="Tuned Model Inference"),gr.Textbox(label="Both images inferenced threshold")],
|
179 |
title="Luffy and Chopper Head detection. SSD mobile net V2 320x320",
|
180 |
description="Upload a Image for prediction or click on below examples. Prediction confident >38% will be shown in dectected images. Threshold slider is WIP",
|
181 |
examples=
|
182 |
[[test1],[test2],[test3],[test4],[test5],[test6],[test7],[test8],[test9],[test10],[test11],[test12],],
|
183 |
-
cache_examples=True,examples_per_page=12#,label="select image with 0.38 threshold to inference, you may amend threshold after inference"
|
184 |
)
|
185 |
|
186 |
# tuned_image = gr.Interface(
|
@@ -214,7 +214,7 @@ base_image = gr.Interface(
|
|
214 |
|
215 |
video = gr.Interface(
|
216 |
fn=lambda x,y: [x,y], #video_demo,
|
217 |
-
inputs=[gr.Video(label="Base Model Video",interactive=False
|
218 |
outputs=[gr.Video(label="Base Model Inferenced Video"), gr.Video(label="Tuned Model Inferenced Video")],
|
219 |
examples=[
|
220 |
[a, b]
|
|
|
175 |
# inputs=[gr.Image(type="pil"),gr.Slider(minimum=0.01, maximum=1, value=0.38 ,label="Threshold",info="[not in used]to set prediction confidence threshold")],
|
176 |
inputs=[gr.Image(type="pil"),gr.Slider(minimum=0.01, maximum=1,value=threshold_d ,label="[WIP]To change default 0.38 prediction confidence Threshold",info="[not in used]Select image with 0.38 threshold to start, you may amend threshold after each first image inference")],
|
177 |
|
178 |
+
outputs=[gr.Image(type="pil",label="Base Model Inference"),gr.Image(type="pil",label="Tuned Model Inference"),gr.Textbox(label="[WIP]Both images inferenced threshold")],
|
179 |
title="Luffy and Chopper Head detection. SSD mobile net V2 320x320",
|
180 |
description="Upload a Image for prediction or click on below examples. Prediction confident >38% will be shown in dectected images. Threshold slider is WIP",
|
181 |
examples=
|
182 |
[[test1],[test2],[test3],[test4],[test5],[test6],[test7],[test8],[test9],[test10],[test11],[test12],],
|
183 |
+
cache_examples=True,examples_per_page=12 #,label="select image with 0.38 threshold to inference, you may amend threshold after inference"
|
184 |
)
|
185 |
|
186 |
# tuned_image = gr.Interface(
|
|
|
214 |
|
215 |
video = gr.Interface(
|
216 |
fn=lambda x,y: [x,y], #video_demo,
|
217 |
+
inputs=[gr.Video(label="Base Model Video",interactive=False),gr.Video(label="Tuned Model Video",interactive=False)],
|
218 |
outputs=[gr.Video(label="Base Model Inferenced Video"), gr.Video(label="Tuned Model Inferenced Video")],
|
219 |
examples=[
|
220 |
[a, b]
|