Dongxu Li commited on
Commit
d2f0b33
1 Parent(s): ee0e33d

disable image uploading.

Browse files
Files changed (1) hide show
  1. app.py +12 -7
app.py CHANGED
@@ -6,10 +6,6 @@ from torchvision.transforms.functional import InterpolationMode
6
 
7
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
8
 
9
-
10
-
11
-
12
-
13
  import gradio as gr
14
 
15
  from models.blip import blip_decoder
@@ -61,14 +57,23 @@ def inference(raw_image, model_n, question, strategy):
61
  answer = model_vq(image_vq, question, train=False, inference='generate')
62
  return 'answer: '+answer[0]
63
 
64
- inputs = [gr.inputs.Image(type='pil'),gr.inputs.Radio(choices=['Image Captioning',"Visual Question Answering"], type="value", default="Image Captioning", label="Task"),gr.inputs.Textbox(lines=2, label="Question"),gr.inputs.Radio(choices=['Beam search','Nucleus sampling'], type="value", default="Nucleus sampling", label="Caption Decoding Strategy")]
 
 
 
 
 
 
65
  outputs = gr.outputs.Textbox(label="Output")
66
 
67
  title = "BLIP"
68
 
69
  description = "Gradio demo for BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation (Salesforce Research). To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
70
 
71
- article = "<p style='text-align: center'><a href='https://arxiv.org/abs/2201.12086' target='_blank'>BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation</a> | <a href='https://github.com/salesforce/BLIP' target='_blank'>Github Repo</a></p>"
72
-
 
 
 
73
 
74
  gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=[['starrynight.jpeg',"Image Captioning","None","Nucleus sampling"]]).launch(enable_queue=True)
6
 
7
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
8
 
 
 
 
 
9
  import gradio as gr
10
 
11
  from models.blip import blip_decoder
57
  answer = model_vq(image_vq, question, train=False, inference='generate')
58
  return 'answer: '+answer[0]
59
 
60
+ inputs = [
61
+ gr.Image(type='pil', interactive=False),
62
+ gr.inputs.Radio(choices=['Image Captioning',"Visual Question Answering"],
63
+ type="value",
64
+ default="Image Captioning",
65
+ label="Task"
66
+ ),gr.inputs.Textbox(lines=2, label="Question"),gr.inputs.Radio(choices=['Beam search','Nucleus sampling'], type="value", default="Nucleus sampling", label="Caption Decoding Strategy")]
67
  outputs = gr.outputs.Textbox(label="Output")
68
 
69
  title = "BLIP"
70
 
71
  description = "Gradio demo for BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation (Salesforce Research). To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
72
 
73
+ article = """
74
+ <p style='text-align: center'><a href='https://arxiv.org/abs/2201.12086' target='_blank'>BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation</a> | <a href='https://github.com/salesforce/BLIP' target='_blank'>Github Repo</a></p>
75
+ <p><strong>We have now disable image uploading as of March 23. 2023. </strong>
76
+ <p><strong>For example usage, see notebooks https://github.com/salesforce/LAVIS/tree/main/examples.</strong>
77
+ """
78
 
79
  gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=[['starrynight.jpeg',"Image Captioning","None","Nucleus sampling"]]).launch(enable_queue=True)