from PIL import Image from transformers import BlipProcessor, BlipForConditionalGeneration import gradio as gr # Initialization of the BLIP processor and model processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large") model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large") def generate_captions(image, text=""): # Convert the uploaded image to PIL Image raw_image = Image.fromarray(image).convert('RGB') if text: # Conditional image captioning inputs = processor(raw_image, text, return_tensors="pt") else: # Unconditional image captioning inputs = processor(raw_image, return_tensors="pt") # Generate captions for the image out = model.generate(**inputs) caption = processor.decode(out[0], skip_special_tokens=True) return caption # Gradio Interface iface = gr.Interface( fn=generate_captions, inputs=[ gr.Image(label="Upload/Drag Image"), # Removed the 'tool' argument gr.Textbox(label="Conditional Text (optional)", placeholder="Enter conditional text (optional)...") ], outputs=gr.Textbox(label="Generated Caption"), title="BLIP Image Caption Generator", description="This app generates captions for uploaded images. You can also provide conditional text to guide the caption generation." ) if __name__ == "__main__": iface.launch()