prithivMLmods commited on
Commit
fd1d0d3
1 Parent(s): 3799232

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +32 -0
  2. requirements.txt +2 -0
app.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import requests
3
+ from PIL import Image
4
+ from transformers import BlipProcessor, BlipForConditionalGeneration
5
+ import time
6
+
7
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
8
+ model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
9
+
10
+ def caption(img, min_len, max_len):
11
+ raw_image = Image.open(img).convert('RGB')
12
+
13
+ inputs = processor(raw_image, return_tensors="pt")
14
+
15
+ out = model.generate(**inputs, min_length=min_len, max_length=max_len)
16
+ return processor.decode(out[0], skip_special_tokens=True)
17
+
18
+ def greet(img, min_len, max_len):
19
+ start = time.time()
20
+ result = caption(img, min_len, max_len)
21
+ end = time.time()
22
+ total_time = str(end - start)
23
+ result = result + '\n' + total_time + ' seconds'
24
+ return result
25
+
26
+ iface = gr.Interface(fn=greet,
27
+ title='Blip Image Captioning Large',
28
+ description="[Salesforce/blip-image-captioning-large](https://huggingface.co/Salesforce/blip-image-captioning-large)",
29
+ inputs=[gr.Image(type='filepath', label='Image'), gr.Slider(label='Minimum Length', minimum=1, maximum=1000, value=30), gr.Slider(label='Maximum Length', minimum=1, maximum=1000, value=100)],
30
+ outputs=gr.Textbox(label='Caption'),
31
+ theme = gr.themes.Base(primary_hue="teal",secondary_hue="teal",neutral_hue="slate"),)
32
+ iface.launch()
requirements.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ transformers
2
+ torch