AlphaV2
Fixed image handling for BLIP
98723e0
import torch
from transformers import BlipProcessor, BlipForConditionalGeneration
import gradio as gr
from PIL import Image
# Load the AI Image Captioning Model
device = "cuda" if torch.cuda.is_available() else "cpu"
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base").to(device)
# Define Caption Generation Function
def generate_caption(image):
if isinstance(image, Image.Image) and image.mode != "RGB":
image = image.convert("RGB")
inputs = processor(image, return_tensors="pt").to(device)
caption = model.generate(**inputs)
return processor.decode(caption[0], skip_special_tokens=True)
# Gradio Interface
iface = gr.Interface(
fn=generate_caption,
inputs=gr.Image(type="pil"),
outputs=gr.Textbox(label="Generated Caption"),
title="🖼️ Auto Image Captioning",
description="Upload an image, and let AI caption it with BLIP!"
)
# Launch Gradio App (no share=True needed for Hugging Face Spaces)
iface.launch()