#!/usr/bin/env python from __future__ import annotations import gradio as gr import PIL.Image import spaces import torch from transformers import AutoProcessor, BlipForConditionalGeneration DESCRIPTION = "# Image Captioning with BLIP" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model_id = "Salesforce/blip-image-captioning-large" processor = AutoProcessor.from_pretrained(model_id) model = BlipForConditionalGeneration.from_pretrained(model_id).to(device) @spaces.GPU def run(image: PIL.Image.Image, text: str = "A picture of") -> str: inputs = processor(images=image, text=text, return_tensors="pt").to(device) generated_ids = model.generate(pixel_values=inputs.pixel_values, num_beams=3, max_length=20, min_length=5) generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] return generated_caption with gr.Blocks(css="style.css") as demo: gr.Markdown(DESCRIPTION) input_image = gr.Image(type="pil") text = gr.Textbox(label="Text", value="A picture of") run_button = gr.Button("Caption") output = gr.Textbox(label="Result") gr.on( triggers=[text.submit, run_button.click], fn=run, inputs=[input_image, text], outputs=output, api_name="caption", ) if __name__ == "__main__": demo.queue(max_size=20).launch()