File size: 1,188 Bytes
950cca2
 
5039c41
 
7ab18eb
5039c41
0b63a96
5039c41
 
d3bc1ff
0b63a96
5039c41
7ab18eb
 
950cca2
5039c41
7ab18eb
d95697d
 
5270787
5039c41
950cca2
 
 
 
 
 
 
6da8c8e
 
cd5e87b
 
6da8c8e
950cca2
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import pathlib

import gradio as gr
import open_clip
import torch

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

model, _, transform = open_clip.create_model_and_transforms(
    "coca_ViT-L-14",
    pretrained="mscoco_finetuned_laion2B-s13B-b90k"
)
model.to(device)


def output_generate(image):
    im = transform(image).unsqueeze(0).to(device)
    with torch.no_grad(), torch.cuda.amp.autocast():
        generated = model.generate(im, seq_len=20)
    return open_clip.decode(generated[0].detach()).split("<end_of_text>")[0].replace("<start_of_text>", "")


paths = sorted(pathlib.Path("images").glob("*.jpg"))

iface = gr.Interface(
    fn=output_generate,
    inputs=gr.Image(label="Input image", type="pil"),
    outputs=gr.Text(label="Caption output"),
    title="CoCa: Contrastive Captioners",
    description=(
        "An open source implementation of **CoCa: Contrastive Captioners are Image-Text Foundation Models** https://arxiv.org/abs/2205.01917. "
        "Built using [open_clip](https://github.com/mlfoundations/open_clip) with an effort from [LAION](https://laion.ai/)."
    ),
    examples=[path.as_posix() for path in paths],
)
iface.launch()