dlaima's picture
Update app.py
052f2f5 verified
# app.py
import io
from PIL import Image
import requests
import warnings
import gradio as gr
from transformers import pipeline
# Suppress warnings
warnings.filterwarnings("ignore", message=".*Using the model-agnostic default `max_length`.*")
# Load BLIP image captioning model via Hugging Face pipeline
captioner = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
# Helper function to download/process image and generate caption
def caption_image(image_url):
try:
# Load image from URL
response = requests.get(image_url)
response.raise_for_status()
image = Image.open(io.BytesIO(response.content)).convert("RGB")
# Generate caption using the pipeline
caption = captioner(image)[0]["generated_text"]
return caption
except Exception as e:
return f"Error processing image: {str(e)}"
# Gradio interface with JPEG examples
demo = gr.Interface(
fn=caption_image,
inputs=gr.Textbox(label="Image URL"),
outputs="text",
title="Image Captioning App",
description=(
"Upload an image or use one of the predefined examples to generate a caption. "
"This app uses `Salesforce/blip-image-captioning-base`."
),
examples=[
['https://free-images.com/lg/9e46/white_bengal_tiger_tiger_0.jpg']
],
flagging_mode="never"
)
if __name__ == "__main__":
demo.launch()