File size: 1,757 Bytes
8842af8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1a70592
8842af8
 
 
 
1a70592
8842af8
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
import sys

import gradio as gr
import jax
from huggingface_hub import snapshot_download
from PIL import Image
from transformers import AutoTokenizer

LOCAL_PATH = snapshot_download("flax-community/clip-spanish")
sys.path.append(LOCAL_PATH)

from modeling_hybrid_clip import FlaxHybridCLIP
from test_on_image import prepare_image, prepare_text


def save_file_to_disk(uplaoded_file):
    temp_file = "/tmp/image.jpeg"
    im = Image.fromarray(uplaoded_file)
    im.save(temp_file)
    # with open(temp_file, "wb") as f:
    #     f.write(uploaded_file.getbuffer())
    return temp_file


def run_inference(image_path, text, model, tokenizer):
    pixel_values = prepare_image(image_path, model)
    input_text = prepare_text(text, tokenizer)
    model_output = model(
        input_text["input_ids"],
        pixel_values,
        attention_mask=input_text["attention_mask"],
        train=False,
        return_dict=True,
    )
    logits = model_output["logits_per_image"]
    score = jax.nn.sigmoid(logits)[0][0]
    return score


def load_tokenizer_and_model():
    # load the saved model
    tokenizer = AutoTokenizer.from_pretrained(
        "bertin-project/bertin-roberta-base-spanish"
    )
    model = FlaxHybridCLIP.from_pretrained(LOCAL_PATH)
    return tokenizer, model


tokenizer, model = load_tokenizer_and_model()


def score_image_caption_pair(uploaded_file, text_input):
    local_image_path = save_file_to_disk(uploaded_file)
    score = run_inference(
        local_image_path, text_input, model, tokenizer).tolist()
    return {"Score": score},  "{:.2f}".format(score)


image = gr.inputs.Image(shape=(299, 299))
iface = gr.Interface(
    fn=score_image_caption_pair, inputs=[image, "text"], outputs=["label", "text"]
)
iface.launch()