ubermenchh commited on
Commit
48ed931
1 Parent(s): 8ff20e4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +30 -0
app.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import Blip2VisionConfig, Blip2QFormerConfig, OPTConfig, Blip2Config, Blip2ForConditionalGeneration, Blip2VisionModel, Blip2Processor
2
+ from PIL import Image
3
+ import requests
4
+ import torch
5
+ import gradio as gr
6
+
7
+ config = Blip2Config()
8
+ model = Blip2ForConditionalGeneration(config)
9
+ config = model.config
10
+
11
+ vis_config = Blip2VisionConfig()
12
+ model = Blip2VisionModel(vis_config)
13
+ config_2 = model.config
14
+
15
+ processor = Blip2Processor.from_pretrained('Salesforce/blip2-opt-2.7b')
16
+ model = Blip2ForConditionalGeneration.from_pretrained('Salesforce/blip2-opt-2.7b')
17
+
18
+ def captioning(image):
19
+ inputs = processor(images=image, return_tensors='pt')
20
+ generated_ids = model.generate(**inputs)
21
+ generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
22
+ return image, generated_text
23
+
24
+ demo = gr.Interface(
25
+ captioning,
26
+ inputs=gr.Image(type="pil"),
27
+ outputs = ['image', 'text']
28
+ )
29
+
30
+ demo.launch()