Spaces:
Runtime error
Runtime error
import pathlib | |
import gradio as gr | |
import open_clip | |
import torch | |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
model, _, transform = open_clip.create_model_and_transforms( | |
"coca_ViT-B-32", | |
pretrained="laion2b_s13b_b90k" | |
) | |
model.to(device) | |
model.eval() | |
def output_generate(image): | |
im = transform(image).unsqueeze(0).to(device) | |
with torch.no_grad(), torch.cuda.amp.autocast(): | |
generated = model.generate(im, seq_len=20) | |
return open_clip.decode(generated[0].detach()).split("<end_of_text>")[0].replace("<start_of_text>", "") | |
def inference_caption(image): | |
im = transform(image).unsqueeze(0).to(device) | |
with torch.no_grad(), torch.cuda.amp.autocast(): | |
generated = model.generate( | |
im, | |
generation_type="beam_search", | |
top_p=1.0, | |
min_seq_len=30, | |
seq_len=100, | |
repetition_penalty=1.2 | |
) | |
return open_clip.decode(generated[0].detach()).split("<end_of_text>")[0].replace("<start_of_text>", "") | |
image_input = gr.inputs.Image(type="pil") | |
caption_output = gr.outputs.Textbox(label="Caption Output") | |
caption_interface = gr.Interface(fn=inference_caption, inputs=image_input, outputs=caption_output, capture_session=True, title="CoCa: Contrastive Captioners", description="An open source implementation of CoCa: Contrastive Captioners are Image-Text Foundation Models.", examples=[path.as_posix() for path in sorted(pathlib.Path("images").glob("*.jpg"))], allow_flagging=False) | |
caption_interface.launch() |