File size: 1,356 Bytes
e770a77
0273271
99c773c
0273271
e770a77
efb9c5f
e770a77
0273271
 
 
e770a77
0273271
efb9c5f
e770a77
0273271
e770a77
0273271
e770a77
 
 
0273271
 
 
e770a77
 
 
0273271
e770a77
 
 
 
 
 
 
 
 
 
 
 
 
 
0273271
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, PreTrainedTokenizerFast

model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
vit_feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k")
tokenizer = PreTrainedTokenizerFast.from_pretrained("distilgpt2")

def vit2distilgpt2(img):
    pixel_values = vit_feature_extractor(images=img, return_tensors="pt").pixel_values
    encoder_outputs = model.generate(pixel_values.to('cpu'), num_beams=5, num_return_sequences=3)
    generated_sentences = tokenizer.batch_decode(encoder_outputs, skip_special_tokens=True)

    return generated_sentences

import gradio as gr

inputs = [
    gr.inputs.Image(type="pil", label="Original Images")
]

outputs = [
    gr.outputs.Textbox(label="Caption 1"),
    gr.outputs.Textbox(label="Caption 2"),
    gr.outputs.Textbox(label="Caption 3")
]

title = "Image Captioning using ViT + GPT2"
description = "ViT and GPT2 are used to generate Image Caption for the uploaded image. COCO DataSet is used for Training"
examples = [
    ["Image1.png"],
    ["Image2.png"],
    ["Image3.png"]
]

gr.Interface(
    vit2distilgpt2,
    inputs,
    outputs,
    title=title,
    description=description,
    examples=examples,
    theme="huggingface",
).launch(debug=True, enable_queue=True)