gxy commited on
Commit
57f0afd
1 Parent(s): 4fee4fe

FIRST: add app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -0
app.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from PIL import Image
3
+ import requests
4
+ import torch
5
+ from transformers import BlipForConditionalGeneration, BlipProcessor, GenerationConfig
6
+
7
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
8
+
9
+
10
+ _MODEL_PATH = 'IDEA-CCNL/Taiyi-BLIP-750M-Chinese'
11
+
12
+ processor = BlipProcessor.from_pretrained(_MODEL_PATH)
13
+ model = BlipForConditionalGeneration.from_pretrained(_MODEL_PATH).half().eval().to(device)
14
+
15
+
16
+ def inference(raw_image, model_n, question, strategy):
17
+ if model_n == 'Image Captioning':
18
+ image = processor(raw_image).to(device, torch.float16)
19
+ with torch.no_grad():
20
+ if strategy == "Beam search":
21
+ config = GenerationConfig(
22
+ do_sample=False,
23
+ num_beams=3,
24
+ max_length=20,
25
+ min_length=5,
26
+ )
27
+ captions = model.generate(image, generation_config=config)
28
+ else:
29
+ config = GenerationConfig(
30
+ do_sample=True,
31
+ top_p=0.9,
32
+ max_length=20,
33
+ min_length=5,
34
+ )
35
+ captions = model.generate(image, generation_config=config)
36
+ caption = processor.decode(captions[0], skip_special_tokens=True)
37
+ caption = caption.replace(' ', '')
38
+ return 'caption: '+caption
39
+
40
+
41
+ inputs = [gr.inputs.Image(type='pil'), gr.inputs.Radio(choices=['Image Captioning'], type="value", default="Image Captioning", label="Task"), gr.inputs.Textbox(
42
+ lines=2, label="Question"), gr.inputs.Radio(choices=['Beam search', 'Nucleus sampling'], type="value", default="Nucleus sampling", label="Caption Decoding Strategy")]
43
+ outputs = gr.outputs.Textbox(label="Output")
44
+
45
+ title = "BLIP"
46
+
47
+ description = "Gradio demo for BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation (Salesforce Research). To use it, simply upload your image, or click one of the examples to load them. Read more at the links below."
48
+
49
+ article = "<p style='text-align: center'><a href='https://github.com/IDEA-CCNL/Fengshenbang-LM' target='_blank'>Github Repo</a></p>"
50
+
51
+
52
+ gr.Interface(inference, inputs, outputs, title=title, description=description, article=article, examples=[
53
+ ['starrynight.jpeg', "Image Captioning", "None", "Nucleus sampling"]]).launch(enable_queue=True)