Spaces:
Runtime error
Runtime error
from PIL import Image | |
import requests | |
import gradio as gr | |
from transformers import BlipProcessor, BlipForConditionalGeneration | |
model = BlipForConditionalGeneration.from_pretrained('jaimin/Imagecap') | |
processor = BlipProcessor.from_pretrained('jaimin/Imagecap') | |
def predict(image,max_length=64, num_beams=4): | |
image = image.convert('RGB') | |
#image = feature_extractor(image, return_tensors="pt").pixel_values.to(device) | |
inputs = processor(image, return_tensors="pt") | |
#clean_text = lambda x: x.replace('<|endoftext|>','').split('\n')[0] | |
caption_ids = model.generate(inputs, max_length = max_length)[0] | |
caption_text = tokenizer.decode(caption_ids) | |
return processor.decode(caption_ids[0], skip_special_tokens=True) | |
input = gr.inputs.Image(label="Upload your Image", type = 'pil', optional=True) | |
output = gr.outputs.Textbox(label="Captions") | |
title = "ImageCap" | |
interface = gr.Interface( | |
fn=predict, | |
inputs = input, | |
outputs=output, | |
title=title, | |
) | |
interface.launch(debug=True) |