AjithBharadwaj commited on
Commit
b1f99b2
·
1 Parent(s): f10f7c6

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -0
app.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import pipeline
2
+ import gradio as gr
3
+ import os
4
+ import io
5
+ import IPython.display
6
+ from PIL import Image
7
+ import base64
8
+ import gradio as gr
9
+
10
+ get_completion = pipeline("image-to-text",model="Salesforce/blip-image-captioning-base")
11
+
12
+ def image_to_base64_str(pil_image):
13
+ byte_arr = io.BytesIO()
14
+ pil_image.save(byte_arr, format='PNG')
15
+ byte_arr = byte_arr.getvalue()
16
+ return str(base64.b64encode(byte_arr).decode('utf-8'))
17
+
18
+ def captioner(image):
19
+ base64_image = image_to_base64_str(image)
20
+ result = get_completion(base64_image)
21
+ return result[0]['generated_text']
22
+
23
+ gr.close_all()
24
+ demo = gr.Interface(fn=captioner,
25
+ inputs=[gr.Image(label="Upload image", type="pil")],
26
+ outputs=[gr.Textbox(label="Caption")],
27
+ title="Image Captioning with BLIP",
28
+ description="Caption any image using the BLIP model",
29
+ allow_flagging="never",
30
+ examples=["christmas_dog.jpeg", "bird_flight.jpeg", "cow.jpeg"])
31
+
32
+ demo.launch(infline=False)