Dendup commited on
Commit
0aca14b
1 Parent(s): bc8b899

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -0
app.py CHANGED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from PIL import Image
3
+ import requests
4
+ from transformers import BlipProcessor, BlipForConditionalGeneration
5
+
6
+ # Load the BLIP model
7
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
8
+ model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
9
+
10
+ # Streamlit app
11
+ st.title("Image Captioning with BLIP")
12
+
13
+ # Uploading the image
14
+ uploaded_image = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
15
+ if uploaded_image is not None:
16
+ image = Image.open(uploaded_image).convert('RGB')
17
+ st.image(image, caption='Uploaded Image', use_column_width=True)
18
+
19
+ # Perform conditional image captioning
20
+ captioning_mode = st.selectbox("Captioning Mode", ["Conditional", "Unconditional"])
21
+ if captioning_mode == "Conditional":
22
+ text = st.text_input("Provide a condition for the captioning (e.g., 'a photo of', 'an illustration of'): ", "a photo of")
23
+ if text: # Only proceed if the user has provided a text
24
+ inputs = processor(image, text, return_tensors="pt")
25
+ out = model.generate(**inputs)
26
+ caption = processor.decode(out[0], skip_special_tokens=True)
27
+ st.write(f"Generated Caption: {caption}")
28
+ else: # Unconditional captioning
29
+ inputs = processor(image, return_tensors="pt")
30
+ out = model.generate(**inputs)
31
+ caption = processor.decode(out[0], skip_special_tokens=True)
32
+ st.write(f"Generated Caption: {caption}")