import pathlib import gradio as gr import open_clip import torch device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model, _, transform = open_clip.create_model_and_transforms( "coca_ViT-L-14", pretrained="mscoco_finetuned_laion2B-s13B-b90k" ) model.to(device) def output_generate(image): im = transform(image).unsqueeze(0).to(device) with torch.no_grad(), torch.cuda.amp.autocast(): generated = model.generate(im, seq_len=20) return open_clip.decode(generated[0].detach()).split("")[0].replace("", "") paths = sorted(pathlib.Path("images").glob("*.jpg")) iface = gr.Interface( fn=output_generate, inputs=gr.Image(label="Input image", type="pil"), outputs=gr.Text(label="Caption output"), title="CoCa: Contrastive Captioners", description=( """
An open source implementation of CoCa: Contrastive Captioners are Image-Text Foundation Models https://arxiv.org/abs/2205.01917.
Built using open_clip with an effort from LAION.
For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings. Duplicate Space""" ), article="""""", examples=[path.as_posix() for path in paths], ) iface.launch()