Spaces:
Runtime error
Runtime error
Create app2.py
Browse files
app2.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import gradio as gr
|
3 |
+
import re
|
4 |
+
from transformers import AutoTokenizer, ViTFeatureExtractor, VisionEncoderDecoderModel
|
5 |
+
|
6 |
+
device='cpu'
|
7 |
+
encoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
|
8 |
+
decoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
|
9 |
+
model_checkpoint = "nlpconnect/vit-gpt2-image-captioning"
|
10 |
+
feature_extractor = ViTFeatureExtractor.from_pretrained(encoder_checkpoint)
|
11 |
+
tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint)
|
12 |
+
model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device)
|
13 |
+
|
14 |
+
def predict(image,max_length=64, num_beams=4):
|
15 |
+
image = image.convert('RGB')
|
16 |
+
image = feature_extractor(image, return_tensors="pt").pixel_values.to(device)
|
17 |
+
clean_text = lambda x: x.replace('<|endoftext|>','').split('\n')[0]
|
18 |
+
caption_ids = model.generate(image, max_length = max_length)[0]
|
19 |
+
caption_text = clean_text(tokenizer.decode(caption_ids))
|
20 |
+
return caption_text
|
21 |
+
|
22 |
+
def set_example_image(example: list) -> dict:
|
23 |
+
return gr.Image.update(value=example[0])
|
24 |
+
css = '''
|
25 |
+
h1#title {
|
26 |
+
text-align: center;
|
27 |
+
}
|
28 |
+
h3#header {
|
29 |
+
text-align: center;
|
30 |
+
}
|
31 |
+
img#overview {
|
32 |
+
max-width: 800px;
|
33 |
+
max-height: 600px;
|
34 |
+
}
|
35 |
+
img#style-image {
|
36 |
+
max-width: 1000px;
|
37 |
+
max-height: 600px;
|
38 |
+
}
|
39 |
+
'''
|
40 |
+
demo = gr.Blocks(css=css)
|
41 |
+
with demo:
|
42 |
+
gr.Markdown('''<h1 id="title">Image Caption 🖼️</h1>''')
|
43 |
+
gr.Markdown('''Made by : Shreyas Dixit''')
|
44 |
+
with gr.Column():
|
45 |
+
input = gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True)
|
46 |
+
output = gr.outputs.Textbox(type="auto",label="Captions")
|
47 |
+
btn = gr.Button("Genrate Caption")
|
48 |
+
btn.click(fn=predict, inputs=input, outputs=output)
|
49 |
+
|
50 |
+
demo.launch()
|