import gradio as gr from transformers import AutoProcessor, AutoTokenizer, AutoImageProcessor, AutoModelForCausalLM, BlipForConditionalGeneration, VisionEncoderDecoderModel import torch torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg') torch.hub.download_url_to_file('https://huggingface.co/datasets/nielsr/textcaps-sample/resolve/main/stop_sign.png', 'stop_sign.png') torch.hub.download_url_to_file('https://cdn.openai.com/dall-e-2/demos/text2im/astronaut/horse/photo/0.jpg', 'astronaut.jpg') git_processor = AutoProcessor.from_pretrained("microsoft/git-large-coco") git_model = AutoModelForCausalLM.from_pretrained("microsoft/git-large-coco") device = "cuda" if torch.cuda.is_available() else "cpu" git_model.to(device) def generate_caption(processor, model, image): inputs = processor(images=image, return_tensors="pt").to(device) generated_ids = model.generate(pixel_values=inputs.pixel_values, max_length=50) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] return generated_caption def generate_captions(image): return generate_caption(git_processor, git_model, image) examples = [["cats.jpg"], ["stop_sign.png"], ["astronaut.jpg"]] outputs = [gr.outputs.Textbox(label="Caption generated by GIT-Large coco")] title = "Interactive demo: GIT-Large coco image captioning" description = "GIT-Large coco image captioning" article = "

BLIP docs | GIT docs

" interface = gr.Interface(fn=generate_captions, inputs=gr.inputs.Image(type="pil"), outputs=outputs, examples=examples, title=title, description=description, article=article, enable_queue=True) interface.launch(debug=True)