doevent commited on
Commit
d10a203
1 Parent(s): c1b284c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +15 -10
app.py CHANGED
@@ -56,7 +56,10 @@ def inference(raw_image, model_n, question, strategy):
56
  answer = model_vq(image_vq, question, train=False, inference='generate')
57
  return 'answer: '+answer[0]
58
 
59
- inputs = [gr.Image(type='pil'),gr.Radio(choices=['Image Captioning',"Visual Question Answering"], type="value", default="Image Captioning", label="Task"),gr.Textbox(lines=2, label="Question"),gr.Radio(choices=['Beam search','Nucleus sampling'], type="value", default="Nucleus sampling", label="Caption Decoding Strategy")]
 
 
 
60
  outputs = gr.Textbox(label="Output")
61
 
62
  title = "BLIP"
@@ -66,12 +69,14 @@ description = "Gradio demo for BLIP: Bootstrapping Language-Image Pre-training f
66
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2201.12086' target='_blank'>BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation</a> | <a href='https://github.com/salesforce/BLIP' target='_blank'>Github Repo</a></p>"
67
 
68
 
69
- gr.Interface(inference,
70
- inputs,
71
- outputs,
72
- title=title,
73
- description=description,
74
- article=article,
75
- examples=[['starrynight.jpeg',"Image Captioning","None","Nucleus sampling"]],
76
- allow_flagging='never',
77
- cache_examples=False).queue(concurrency_count=1).launch(show_error=True)
 
 
 
56
  answer = model_vq(image_vq, question, train=False, inference='generate')
57
  return 'answer: '+answer[0]
58
 
59
+ inputs = [gr.Image(type='pil'),
60
+ gr.Radio(choices=['Image Captioning',"Visual Question Answering"], type="value", value="Image Captioning", label="Task"),
61
+ gr.Textbox(lines=2, label="Question"),
62
+ gr.Radio(choices=['Beam search','Nucleus sampling'], type="value", value="Nucleus sampling", label="Caption Decoding Strategy")]
63
  outputs = gr.Textbox(label="Output")
64
 
65
  title = "BLIP"
 
69
  article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2201.12086' target='_blank'>BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation</a> | <a href='https://github.com/salesforce/BLIP' target='_blank'>Github Repo</a></p>"
70
 
71
 
72
+ demo = gr.Interface(inference,
73
+ inputs,
74
+ outputs,
75
+ title=title,
76
+ description=description,
77
+ article=article,
78
+ examples=[['starrynight.jpeg',"Image Captioning","None","Nucleus sampling"]],
79
+ allow_flagging='never',
80
+ cache_examples="lazy",
81
+ delete_cache=(4000, 4000))
82
+ demo.queue(default_concurrency_limit=2).launch(show_error=True)