# -*- coding: utf-8 -*- """Image Captioning with ViT+GPT2 Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1P3O0gO5AUqSmM8rE9dxy2tXJ-9jkhxHz """ #! pip install transformers -q #! pip install gradio -q from PIL import Image from transformers import VisionEncoderDecoderModel, ViTFeatureExtractor, PreTrainedTokenizerFast import requests model = VisionEncoderDecoderModel.from_pretrained("sachin/vit2distilgpt2") vit_feature_extractor = ViTFeatureExtractor.from_pretrained("google/vit-base-patch16-224-in21k") tokenizer = PreTrainedTokenizerFast.from_pretrained("distilgpt2") # url = 'https://d2gp644kobdlm6.cloudfront.net/wp-content/uploads/2016/06/bigstock-Shocked-and-surprised-boy-on-t-113798588-300x212.jpg' # with Image.open(requests.get(url, stream=True).raw) as img: # pixel_values = vit_feature_extractor(images=img, return_tensors="pt").pixel_values #encoder_outputs = model.generate(pixel_values.to('cpu'),num_beams=5) #generated_sentences = tokenizer.batch_decode(encoder_outputs, skip_special_tokens=True) #generated_sentences #naive text processing #generated_sentences[0].split('.')[0] # inference function def vit2distilgpt2(img): pixel_values = vit_feature_extractor(images=img, return_tensors="pt").pixel_values encoder_outputs = generated_ids = model.generate(pixel_values.to('cpu'),num_beams=5) generated_sentences = tokenizer.batch_decode(encoder_outputs, skip_special_tokens=True) return(generated_sentences[0].split('.')[0]) #!wget https://media.glamour.com/photos/5f171c4fd35176eaedb36823/master/w_2560%2Cc_limit/bike.jpg import gradio as gr inputs = [ gr.inputs.Image(type="pil", label="Original Image") ] outputs = [ gr.outputs.Textbox(label = 'Caption') ] title = "Image Captioning using ViT + GPT2" description = "ViT and GPT2 are used to generate Image Caption for the uploaded image. COCO Dataset was used for training. This image captioning model might have some biases that we couldn't figure during our stress testing, so if you find any bias (gender, race and so on) please use `Flag` button to flag the image with bias" article = " Model Repo on Hugging Face Model Hub" examples = [ ["people-walking-street-pedestrian-crossing-traffic-light-city.jpeg"], ["elonmusk.jpeg"] ] gr.Interface( vit2distilgpt2, inputs, outputs, title=title, description=description, article=article, examples=examples, theme="huggingface", ).launch(debug=True, enable_queue=True)