File size: 3,389 Bytes
b36fb35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import gradio as gr
from transformers import pipeline
import os

MODEL_OPENAI_CLIP_VIT_LARGE_PATCH_14 = "openai/clip-vit-large-patch14"
MODEL_OPENAI_CLIP_VIT_BASE_PATCH_16 = "openai/clip-vit-base-patch16"
MODEL_OPENAI_CLIP_VIT_BASE_PATCH_32 = "openai/clip-vit-base-patch32"

input_examles = [
    [
        os.path.join(os.path.dirname(__file__), "images/example-01-two-cats.jpg"), 
        'the photo contains one cat,'   \
        'the photo contains a animal,'  \
        'the photo contains two cats,'  \
        'the photo contains something else',
        MODEL_OPENAI_CLIP_VIT_LARGE_PATCH_14
    ],
    [
        os.path.join(os.path.dirname(__file__), "images/example-02-two-cats.jpg"), 
        'the photo contains one cat,'   \
        'the photo contains a animal,'  \
        'the photo contains two cats,'  \
        'the photo contains something else',
        MODEL_OPENAI_CLIP_VIT_BASE_PATCH_16
    ],
    [
        os.path.join(os.path.dirname(__file__), "images/example-03-woman-with-pink-shirt-and-a-printer.jpg"), 
        'the photo contains a man and a printer,'                       \
        'the photo contains a woman with blue shirt and a printer,'     \
        'the photo contains a woman and a printer,'                     \
        'the photo contains a printer,'                                 \
        'the photo contains a woman with pink shirt and a printer,'     \
        'the photo contains something else',
        MODEL_OPENAI_CLIP_VIT_LARGE_PATCH_14
    ],
    [
        os.path.join(os.path.dirname(__file__), "images/example-04-printer-in-the-left.jpg"), 
        'the photo contains a printer in the left,'     \
        'the photo contains a printer in the right,'    \
        'the photo contains a printer in the middle'    \
        'the photo contains something else',
        MODEL_OPENAI_CLIP_VIT_LARGE_PATCH_14
    ],
    [
        os.path.join(os.path.dirname(__file__), "images/example-05-car-in-the-left-and-motorbike-in-the-right.jpg"), 
        'the photo contains a car in the left and a motorbike in the right,'     \
        'the photo contains a car in the right and a motorbike in the left',
        MODEL_OPENAI_CLIP_VIT_LARGE_PATCH_14
    ]
]

def launch(image_input, labels_input, dropdown_input):
    
    return predict(image_input, labels_input, dropdown_input)

def predict(image_input, labels_input, dropdown_input):
    
    labels_candidate = labels_input.split(",")
    
    selected_model = f"""{dropdown_input}"""
    pipe = pipeline(task="zero-shot-image-classification",  model=selected_model)
    
    predictions = pipe(image_input, candidate_labels=labels_candidate)
    
    return {p["label"]: p["score"] for p in predictions}

# SYSTEM INPUTS
image_input = gr.Image(label="Upload iamge candidate", type="filepath")
text_input = gr.Textbox(type="text", label="Possible classes to detect.")
dropdown_input = gr.Dropdown(
    [
        MODEL_OPENAI_CLIP_VIT_LARGE_PATCH_14,
        MODEL_OPENAI_CLIP_VIT_BASE_PATCH_16,
        MODEL_OPENAI_CLIP_VIT_BASE_PATCH_32
    ], 
    label="OpenAI ClipVit Model")

# SYSTEM OUTPUTS
output_label = gr.outputs.Label()

demo = gr.Interface(
    launch,
    inputs=[image_input, text_input, dropdown_input],
    outputs=output_label,
    title="Demo aobut zero-shot-image-classification",
    examples = input_examles
)

if __name__ == "__main__":
    demo.launch()