tonyassi commited on
Commit
ab2efba
·
verified ·
1 Parent(s): 471f43d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -5
app.py CHANGED
@@ -6,19 +6,20 @@ from transformers import BlipProcessor, BlipForConditionalGeneration
6
  processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
7
  model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
8
 
9
- img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
10
- raw_image = Image.open(requests.get(img_url, stream=True).raw).convert('RGB')
11
-
12
  def caption(img):
13
  raw_image = Image.open(img).convert('RGB')
14
 
15
  inputs = processor(raw_image, return_tensors="pt")
16
 
17
- out = model.generate(**inputs, min_length=30, max_length=1000)
18
  return processor.decode(out[0], skip_special_tokens=True)
19
 
20
  def greet(img):
21
  return caption(img)
22
 
23
- iface = gr.Interface(fn=greet, inputs=gr.Image(type='filepath'), outputs="text")
 
 
 
 
24
  iface.launch()
 
6
  processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
7
  model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")
8
 
 
 
 
9
  def caption(img):
10
  raw_image = Image.open(img).convert('RGB')
11
 
12
  inputs = processor(raw_image, return_tensors="pt")
13
 
14
+ out = model.generate(**inputs, min_length=40, max_length=200)
15
  return processor.decode(out[0], skip_special_tokens=True)
16
 
17
  def greet(img):
18
  return caption(img)
19
 
20
+ iface = gr.Interface(fn=greet,
21
+ title='Blip Image Captioning Large',
22
+ inputs=gr.Image(type='filepath'),
23
+ outputs="text",
24
+ theme = gr.themes.Base(primary_hue="teal",secondary_hue="teal",neutral_hue="slate"),)
25
  iface.launch()