nielsr HF staff commited on
Commit
c1d8fa7
1 Parent(s): cec2751

Add example

Browse files
Files changed (1) hide show
  1. app.py +4 -1
app.py CHANGED
@@ -2,6 +2,8 @@ import gradio as gr
2
  from transformers import ViltProcessor, ViltForVisualQuestionAnswering
3
  import torch
4
 
 
 
5
  processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
6
  model = ViltForVisualQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
7
 
@@ -21,4 +23,5 @@ def answer_question(image, text):
21
  image = gr.inputs.Image(type="pil")
22
  question = gr.inputs.Textbox(label="Question")
23
  answer = gr.outputs.Textbox(label="Predicted answer")
24
- gr.Interface(fn=answer_question, inputs=[image, question], outputs=answer, enable_queue=True).launch(debug=True)
 
 
2
  from transformers import ViltProcessor, ViltForVisualQuestionAnswering
3
  import torch
4
 
5
+ torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
6
+
7
  processor = ViltProcessor.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
8
  model = ViltForVisualQuestionAnswering.from_pretrained("dandelin/vilt-b32-finetuned-vqa")
9
 
 
23
  image = gr.inputs.Image(type="pil")
24
  question = gr.inputs.Textbox(label="Question")
25
  answer = gr.outputs.Textbox(label="Predicted answer")
26
+ examples = [["cats.jpg"], ["How many cats are there?"]]
27
+ gr.Interface(fn=answer_question, inputs=[image, question], outputs=answer, examples=examples, enable_queue=True).launch(debug=True)