File size: 1,571 Bytes
17ad24e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1bd8dce
17ad24e
 
 
 
2ec0341
 
 
 
17ad24e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
# -*- coding: utf-8 -*-


from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
import torch
from PIL import Image

model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")

vit_feature_extractor = ViTImageProcessor.from_pretrained("nlpconnect/vit-gpt2-image-captioning")

tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")


def vit2distilgpt2(img):
  pixel_values = vit_feature_extractor(images=img, return_tensors="pt").pixel_values
  encoder_outputs = generated_ids = model.generate(pixel_values.to('cpu'),num_beams=5)
  generated_sentences = tokenizer.batch_decode(encoder_outputs, skip_special_tokens=True)

  return(generated_sentences[0].split('.')[0])


import gradio as gr

inputs = [
    gr.inputs.Image(type="pil", label="Original Image")
]

outputs = [
    gr.outputs.Textbox(label = 'Caption')
]

title = "Visual Transformer using nlpconnect for Image to Text generation"
description = "ViT and GPT2 are used to generate Image Caption for the uploaded image. COCO Dataset was used for training."
article = " <a href='https://huggingface.co/nlpconnect/vit-gpt2-image-captioning'>Model Repo on Hugging Face Model Hub</a>"
examples = [
    ["Img_1.jpg"],
    ["Img_2.jpg"],
    ["img_2t.jpg"],
    ["img_t2.jpg"],
    ["img4_t.jpg"]
]



gr.Interface(
    vit2distilgpt2,
    inputs,
    outputs,
    title=title,
    description=description,
    article=article,
    examples=examples,
    theme="huggingface",
).launch(debug=True, enable_queue=True)