File size: 912 Bytes
92c48c7
76df62a
2dda520
76df62a
92c48c7
76df62a
 
 
2dda520
 
76df62a
 
2dda520
76df62a
 
2dda520
76df62a
 
 
 
2dda520
 
 
 
5e422b6
2dda520
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
import gradio as gr
from transformers import BlipProcessor, BlipForConditionalGeneration
from PIL import Image
import torch

# Load the processor and model
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large")

def caption_image(image):
    # Prepare the image
    inputs = processor(images=image, return_tensors="pt")
    
    # Generate caption
    out = model.generate(**inputs)
    
    # Decode the generated caption
    caption = processor.decode(out[0], skip_special_tokens=True)
    
    return caption

# Set up the Gradio interface
interface = gr.Interface(
    fn=caption_image,
    inputs=gr.Image(type="pil"),
    outputs="text",
    title="Image Captioning",
    description="Generate captions for images using the BLIP model."
)

# Launch the interface
interface.launch()