File size: 1,812 Bytes
f2305ca 6886779 f2305ca 1817830 f2305ca 1817830 f2305ca 6886779 f2305ca 6886779 f2305ca 6886779 1817830 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
from typing import List
import gradio as gr
import numpy as np
from transformers import CLIPProcessor, CLIPModel
IMAGENET_CLASSES_FILE = "imagenet-classes.txt"
EXAMPLES = ["dog.jpeg", "car.png"]
MARKDOWN = """
# Zero-Shot Image Classification with MetaCLIP
This is the demo for a zero-shot image classification model based on
[MetaCLIP](https://github.com/facebookresearch/MetaCLIP), described in the paper
[Demystifying CLIP Data](https://arxiv.org/abs/2309.16671) that formalizes CLIP data
curation as a simple algorithm.
"""
def load_text_lines(file_path: str) -> List[str]:
with open(file_path, 'r') as file:
lines = file.readlines()
return [line.rstrip() for line in lines]
model = CLIPModel.from_pretrained("facebook/metaclip-b32-400m")
processor = CLIPProcessor.from_pretrained("facebook/metaclip-b32-400m")
imagenet_classes = load_text_lines(IMAGENET_CLASSES_FILE)
def classify_image(input_image) -> str:
print(type(input_image))
inputs = processor(
text=imagenet_classes,
images=input_image,
return_tensors="pt",
padding=True)
outputs = model(**inputs)
print(outputs)
probs = outputs.logits_per_image.softmax(dim=1)
class_index = np.argmax(probs.detach().numpy())
return imagenet_classes[class_index]
with gr.Blocks() as demo:
gr.Markdown(MARKDOWN)
with gr.Row():
image = gr.Image(image_mode='RGB', type='pil')
output_text = gr.Textbox(label="Output")
submit_button = gr.Button("Submit")
submit_button.click(classify_image, inputs=[image], outputs=output_text)
gr.Examples(
examples=EXAMPLES,
fn=classify_image,
inputs=[image],
outputs=[output_text],
cache_examples=True,
run_on_click=True
)
demo.launch(debug=False)
|