m7mdal7aj commited on
Commit
bc7d231
1 Parent(s): c7925f2
Files changed (1) hide show
  1. app.py +45 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import torch
3
+
4
+ from transformers import Blip2Processor, Blip2ForConditionalGeneration
5
+
6
+
7
+ def load_model():
8
+
9
+ processor = Blip2Processor.from_pretrained("Salesforce/blip2-opt-2.7b")
10
+ model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", load_in_8bit=True,torch_dtype=torch.float16, device_map="auto")
11
+
12
+ return model, processor
13
+
14
+
15
+
16
+ def answer_question(image, question, model, processor):
17
+
18
+
19
+ image = Image.open(image).convert('RGB')
20
+
21
+ inputs = processor(image, question, return_tensors="pt").to("cuda", torch.float16)
22
+
23
+ out = model.generate(**inputs, max_length=200, min_length=20, num_beams=1)
24
+
25
+ answer = processor.decode(out[0], skip_special_tokens=True).strip()
26
+ return answer
27
+
28
+ st.title("Image Question Answering")
29
+
30
+ # File uploader for the image
31
+ image = st.file_uploader("Upload an image", type=["png", "jpg", "jpeg"])
32
+
33
+ # Text input for the question
34
+ question = st.text_input("Enter your question about the image:")
35
+
36
+ if st.button("Get Answer"):
37
+ if image is not None and question:
38
+ # Display the image
39
+ st.image(image, use_column_width=True)
40
+ # Get and display the answer
41
+ model, processor = load_caption_model()
42
+ answer = answer_question(image, question, model, processor)
43
+ st.write(answer)
44
+ else:
45
+ st.write("Please upload an image and enter a question.")