Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import pipeline | |
import os | |
MODEL_OPENAI_CLIP_VIT_LARGE_PATCH_14 = "openai/clip-vit-large-patch14" | |
MODEL_OPENAI_CLIP_VIT_BASE_PATCH_16 = "openai/clip-vit-base-patch16" | |
MODEL_OPENAI_CLIP_VIT_BASE_PATCH_32 = "openai/clip-vit-base-patch32" | |
input_examles = [ | |
[ | |
os.path.join(os.path.dirname(__file__), "images/example-01-two-cats.jpg"), | |
'the photo contains one cat,' \ | |
'the photo contains a animal,' \ | |
'the photo contains two cats,' \ | |
'the photo contains something else', | |
MODEL_OPENAI_CLIP_VIT_LARGE_PATCH_14 | |
], | |
[ | |
os.path.join(os.path.dirname(__file__), "images/example-02-two-cats.jpg"), | |
'the photo contains one cat,' \ | |
'the photo contains a animal,' \ | |
'the photo contains two cats,' \ | |
'the photo contains something else', | |
MODEL_OPENAI_CLIP_VIT_BASE_PATCH_16 | |
], | |
[ | |
os.path.join(os.path.dirname(__file__), "images/example-03-woman-with-pink-shirt-and-a-printer.jpg"), | |
'the photo contains a man and a printer,' \ | |
'the photo contains a woman with blue shirt and a printer,' \ | |
'the photo contains a woman and a printer,' \ | |
'the photo contains a printer,' \ | |
'the photo contains a woman with pink shirt and a printer,' \ | |
'the photo contains something else', | |
MODEL_OPENAI_CLIP_VIT_LARGE_PATCH_14 | |
], | |
[ | |
os.path.join(os.path.dirname(__file__), "images/example-04-printer-in-the-left.jpg"), | |
'the photo contains a printer in the left,' \ | |
'the photo contains a printer in the right,' \ | |
'the photo contains a printer in the middle' \ | |
'the photo contains something else', | |
MODEL_OPENAI_CLIP_VIT_LARGE_PATCH_14 | |
], | |
[ | |
os.path.join(os.path.dirname(__file__), "images/example-05-car-in-the-left-and-motorbike-in-the-right.jpg"), | |
'the photo contains a car in the left and a motorbike in the right,' \ | |
'the photo contains a car in the right and a motorbike in the left', | |
MODEL_OPENAI_CLIP_VIT_LARGE_PATCH_14 | |
] | |
] | |
def launch(image_input, labels_input, dropdown_input): | |
return predict(image_input, labels_input, dropdown_input) | |
def predict(image_input, labels_input, dropdown_input): | |
labels_candidate = labels_input.split(",") | |
selected_model = f"""{dropdown_input}""" | |
pipe = pipeline(task="zero-shot-image-classification", model=selected_model) | |
predictions = pipe(image_input, candidate_labels=labels_candidate) | |
return {p["label"]: p["score"] for p in predictions} | |
# SYSTEM INPUTS | |
image_input = gr.Image(label="Upload iamge candidate", type="filepath") | |
text_input = gr.Textbox(type="text", label="Possible classes to detect.") | |
dropdown_input = gr.Dropdown( | |
[ | |
MODEL_OPENAI_CLIP_VIT_LARGE_PATCH_14, | |
MODEL_OPENAI_CLIP_VIT_BASE_PATCH_16, | |
MODEL_OPENAI_CLIP_VIT_BASE_PATCH_32 | |
], | |
label="OpenAI ClipVit Model") | |
# SYSTEM OUTPUTS | |
output_label = gr.outputs.Label() | |
demo = gr.Interface( | |
launch, | |
inputs=[image_input, text_input, dropdown_input], | |
outputs=output_label, | |
title="Demo aobut zero-shot-image-classification", | |
examples = input_examles | |
) | |
if __name__ == "__main__": | |
demo.launch() |