dreamdrop-art commited on
Commit
7494c56
1 Parent(s): dda9682

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -3
app.py CHANGED
@@ -154,8 +154,8 @@ def get_data(text):
154
  return results
155
 
156
 
157
- def send_to_img2img_def(image):
158
- return image
159
 
160
 
161
  def send_to_txt2img(image):
@@ -407,6 +407,14 @@ with gr.Blocks(css=css) as demo:
407
 
408
  hf_text_button.click(hf_inference, inputs=[hf_prompt, hf_negative_prompt, hf_model, hf_steps, sampler, hf_cfg_scale, hf_width, hf_height,
409
  hf_seed], outputs=hf_image_output, concurrency_limit=64)
 
 
 
 
 
 
 
 
 
410
 
411
-
412
  demo.queue(max_size=80, api_open=False).launch(max_threads=256, show_api=False)
 
154
  return results
155
 
156
 
157
+ def send_to_img2img_def(images):
158
+ return images
159
 
160
 
161
  def send_to_txt2img(image):
 
407
 
408
  hf_text_button.click(hf_inference, inputs=[hf_prompt, hf_negative_prompt, hf_model, hf_steps, sampler, hf_cfg_scale, hf_width, hf_height,
409
  hf_seed], outputs=hf_image_output, concurrency_limit=64)
410
+ with gr.Tab("BLIP"):
411
+ with gr.Tab("Base"):
412
+ gr.load("models/Salesforce/blip-image-captioning-base", title="BLIP-base")
413
+ with gr.Tab("Large"):
414
+ gr.load("models/Salesforce/blip-image-captioning-large", title="BLIP-large")
415
+ with gr.Tab("Classification"):
416
+ gr.load("models/google/vit-base-patch16-224", title="ViT Classification")
417
+ with gr.Tab("Segmentation"):
418
+ gr.load("models/mattmdjaga/segformer_b2_clothes", title="SegFormer Segmentation")
419
 
 
420
  demo.queue(max_size=80, api_open=False).launch(max_threads=256, show_api=False)