import gradio as gr from PIL import Image from transformers import BlipProcessor, BlipForConditionalGeneration import requests processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large") model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large") def generate_caption(data, text): if not data: return "Please upload image first" try: inputs = processor(data, text, return_tensors="pt") # data = Image.open(requests.get(data, stream=True).raw).convert('RGB') out = model.generate(**inputs) out_txt = processor.decode(out[0], skip_special_tokens=True) return out_txt except Exception as e: print(e) return f"Something went wrong." iface = gr.Interface( fn=generate_caption, inputs = [gr.Image(label="Image", image_mode="RGB", type="pil"), gr.Textbox(label="Start caption with")], outputs = gr.Textbox(label="Genereated Caption", show_copy_button=True) ) iface.launch()