|
import torch |
|
import re |
|
import gradio as gr |
|
from transformers import AutoTokenizer, VisionEncoderDecoderModel, ViTImageProcessor |
|
|
|
device = 'cpu' |
|
encoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning" |
|
decoder_checkpoint = "nlpconnect/vit-gpt2-image-captioning" |
|
model_checkpoint = "nlpconnect/vit-gpt2-image-captioning" |
|
feature_extractor = ViTImageProcessor.from_pretrained(encoder_checkpoint) |
|
tokenizer = AutoTokenizer.from_pretrained(decoder_checkpoint) |
|
model = VisionEncoderDecoderModel.from_pretrained(model_checkpoint).to(device) |
|
|
|
def predict(image, max_length=128, num_beams=3): |
|
image = image.convert('RGB') |
|
image = feature_extractor(images=image, return_tensors="pt").pixel_values.to(device) |
|
caption_ids = model.generate(image, max_length=max_length, num_beams=num_beams)[0] |
|
caption_text = tokenizer.decode(caption_ids, skip_special_tokens=True) |
|
return caption_text |
|
|
|
input = gr.Image(label="Upload any Image", type='pil') |
|
output = gr.Textbox(type="text", label="Captions") |
|
examples = [f"example{i}.jpeg" for i in range(1,3)] |
|
|
|
title = "Image Captioning" |
|
interface = gr.Interface( |
|
fn=predict, |
|
inputs=input, |
|
outputs=output, |
|
examples=examples, |
|
title=title, |
|
) |
|
interface.launch(debug=True) |
|
|