dafajudin commited on
Commit
5e5ca2a
1 Parent(s): f9c1453

connect to model

Browse files
Files changed (1) hide show
  1. app.py +22 -4
app.py CHANGED
@@ -1,7 +1,25 @@
1
  import gradio as gr
 
2
 
3
- def greet(name):
4
- return "Hello " + name + "!!"
5
 
6
- demo = gr.Interface(fn=greet, inputs="text", outputs="text")
7
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from transformers import pipeline
3
 
4
+ generator = pipeline("visual-question-answering", model="jihadzakki/blip1-medvqa")
 
5
 
6
+ def format_answer(image, question):
7
+ result = generator(image, question)
8
+ print(result) # Print the result to see its structure
9
+ predicted_answer = result[0].get('answer', 'No answer found') # Adjust this key if necessary
10
+
11
+ return f"Predicted Answer: {predicted_answer}"
12
+
13
+ VisualQAApp = gr.Interface(
14
+ fn=format_answer,
15
+ inputs=[
16
+ gr.Image(label="Upload image", type="pil"),
17
+ gr.Textbox(label="Question"),
18
+ ],
19
+ outputs=[gr.Textbox(label="Answer")],
20
+ title="Visual Question Answering using BLIP Model",
21
+ description="VQA",
22
+ allow_flagging="never"
23
+ )
24
+
25
+ VisualQAApp.launch(share=True)