from PIL import Image from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, PreTrainedTokenizerFast import requests model = VisionEncoderDecoderModel.from_pretrained("Zayn/vit2distilgpt2") vit_feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k") tokenizer = PreTrainedTokenizerFast.from_pretrained("distilgpt2") def vit2distilgpt2(img): pixel_values = vit_feature_extractor(images=img, return_tensors="pt").pixel_values encoder_outputs = model.generate(pixel_values.to('cpu'),num_beams=5) generated_sentences = tokenizer.batch_decode(encoder_outputs, skip_special_tokens =True) return(generated_sentences[0].split('.')[0]) import gradio as gr inputs = [ gr.inputs.Image(type="pil", label = "Original Image") ] outputs = [ gr.outputs.Textbox(label = 'Caption') ] title = "Image Captioning using Vision Transformer and GPT-2" description = "Developed by Zayn" article = "< a href='https://huggingface.co/Zayn/vit2distilgpt2'>Hugging Face AI Community" examples = [ ["car.jpg"] ] gr.Interface( vit2distilgpt2, inputs, outputs, title = title, description = description, article = article, examples = examples, theme = "huggingface", ).launch(debug=True,enable_queue=True)