captain-awesome commited on
Commit
5a8da99
·
verified ·
1 Parent(s): 82bf579

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -10
app.py CHANGED
@@ -1,20 +1,48 @@
1
- import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  # from PIL import Image
3
  from transformers.utils import logging
4
  from transformers import BlipForConditionalGeneration, AutoProcessor
 
5
 
6
  logging.set_verbosity_error()
7
 
8
- model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
9
- processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
 
 
 
 
10
 
11
- def caption_image(image):
 
 
 
 
12
  inputs = processor(image, return_tensors="pt")
13
  out = model.generate(**inputs)
14
  caption = processor.decode(out[0], skip_special_tokens=True)
15
- return caption
16
-
17
-
18
-
19
- gr.Interface(caption_image, gr.inputs.Image(), "text").launch()
20
- # gr.Interface(caption_image, image_input, caption_output).launch()
 
1
+ # import gradio as gr
2
+ # # from PIL import Image
3
+ # from transformers.utils import logging
4
+ # from transformers import BlipForConditionalGeneration, AutoProcessor
5
+
6
+ # logging.set_verbosity_error()
7
+
8
+ # model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
9
+ # processor = AutoProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
10
+
11
+ # def caption_image(image):
12
+ # inputs = processor(image, return_tensors="pt")
13
+ # out = model.generate(**inputs)
14
+ # caption = processor.decode(out[0], skip_special_tokens=True)
15
+ # return caption
16
+
17
+
18
+
19
+ # gr.Interface(caption_image, gr.inputs.Image(), "text").launch()
20
+ # # gr.Interface(caption_image, image_input, caption_output).launch()
21
+
22
+
23
+
24
+
25
+ import streamlit as st
26
  # from PIL import Image
27
  from transformers.utils import logging
28
  from transformers import BlipForConditionalGeneration, AutoProcessor
29
+ import torch
30
 
31
  logging.set_verbosity_error()
32
 
33
+ model = BlipForConditionalGeneration.from_pretrained("./models/Salesforce/blip-image-captioning-base")
34
+ processor = AutoProcessor.from_pretrained("./models/Salesforce/blip-image-captioning-base")
35
+
36
+ st.title("Image Captioning")
37
+
38
+ uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
39
 
40
+ if uploaded_file is not None:
41
+ image = Image.open(uploaded_file)
42
+ st.image(image, caption="Uploaded Image", use_column_width=True)
43
+ st.write("")
44
+ st.write("Generating caption...")
45
  inputs = processor(image, return_tensors="pt")
46
  out = model.generate(**inputs)
47
  caption = processor.decode(out[0], skip_special_tokens=True)
48
+ st.write("Caption:", caption)