apailang commited on
Commit
db02e43
β€’
1 Parent(s): 6b64a22

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -6
app.py CHANGED
@@ -40,7 +40,9 @@ def load_model(model_repo_id):
40
  def predict(pilimg,Threshold):
41
 
42
  image_np = pil_image_as_numpy_array(pilimg)
43
- #Thresholdf=float(Threshold)
 
 
44
  return predict2(image_np),predict3(image_np),Threshold
45
 
46
  def predict2(image_np):
@@ -170,13 +172,13 @@ test12 = os.path.join(os.path.dirname(__file__), "data/test12.jpeg")
170
  base_image = gr.Interface(
171
  fn=predict,
172
  # inputs=[gr.Image(type="pil"),gr.Slider(minimum=0.01, maximum=1, value=0.38 ,label="Threshold",info="[not in used]to set prediction confidence threshold")],
173
- inputs=[gr.Image(type="pil"),gr.Textbox(value=0.38 ,label="Threshold",info="to set prediction confidence threshold")],
174
 
175
- outputs=[gr.Image(type="pil",label="Base Model Inference"),gr.Image(type="pil",label="Tuned Model Inference"),gr.Textbox()],
176
  title="Luffy and Chopper Head detection. SSD mobile net V2 320x320",
177
  description="Upload a Image for prediction or click on below examples. Prediction confident >38% will be shown in dectected images. Threshold slider is WIP",
178
- examples=[[test1],[test2],[test3],[test4],[test5],[test6],[test7],[test8],[test9],[test10],[test11],[test12],],
179
- cache_examples=True
180
  )#.launch(share=True)
181
 
182
  # tuned_image = gr.Interface(
@@ -210,7 +212,7 @@ base_image = gr.Interface(
210
 
211
  video = gr.Interface(
212
  fn=lambda x,y: [x,y], #video_demo,
213
- inputs=[gr.Video(label="Base Model Video",interactive=False),gr.Video(label="Tuned Model Video",interactive=False)],
214
  outputs=[gr.Video(label="Base Model Inferenced Video"), gr.Video(label="Tuned Model Inferenced Video")],
215
  examples=[
216
  [a, b]
 
40
  def predict(pilimg,Threshold):
41
 
42
  image_np = pil_image_as_numpy_array(pilimg)
43
+ if Threshold == None
44
+ Threshold=0.38
45
+
46
  return predict2(image_np),predict3(image_np),Threshold
47
 
48
  def predict2(image_np):
 
172
  base_image = gr.Interface(
173
  fn=predict,
174
  # inputs=[gr.Image(type="pil"),gr.Slider(minimum=0.01, maximum=1, value=0.38 ,label="Threshold",info="[not in used]to set prediction confidence threshold")],
175
+ inputs=[gr.Image(type="pil"),gr.Textbox(value=0.38 ,label="Change default 0.38 Threshold",info="to set prediction confidence threshold")],
176
 
177
+ outputs=[gr.Image(type="pil",label="Base Model Inference"),gr.Image(type="pil",label="Tuned Model Inference"),gr.Textbox(label="Tuned Model Inference")],
178
  title="Luffy and Chopper Head detection. SSD mobile net V2 320x320",
179
  description="Upload a Image for prediction or click on below examples. Prediction confident >38% will be shown in dectected images. Threshold slider is WIP",
180
+ examples=[[test1,0.38],[test2,0.38],[test3,0.38],[test4,0.38],[test5,0.38],[test6,0.38],[test7,0.38],[test8,0.38],[test9,0.38],[test10,0.38],[test11,0.38],[test12,0.38],],
181
+ cache_examples=True,examples_per_page=12
182
  )#.launch(share=True)
183
 
184
  # tuned_image = gr.Interface(
 
212
 
213
  video = gr.Interface(
214
  fn=lambda x,y: [x,y], #video_demo,
215
+ inputs=[gr.Video(label="Base Model Video",interactive=False,visible=False),gr.Video(label="Tuned Model Video",interactive=False,visible=False)],
216
  outputs=[gr.Video(label="Base Model Inferenced Video"), gr.Video(label="Tuned Model Inferenced Video")],
217
  examples=[
218
  [a, b]