nlp_zeroShot / app.py
jwphantom
first commit
f30c2df
from transformers import pipeline
import gradio as gr
# Initialiser la pipeline de classification d'image zero-shot avec le modèle CLIP spécifié
model_name = "openai/clip-vit-large-patch14-336"
classifier = pipeline("zero-shot-image-classification", model=model_name)
def classify_image(image, labels):
image_to_classify = image
labels = labels.split(",")
labels_for_classification = labels
scores = classifier(image_to_classify, candidate_labels=labels_for_classification)
print(
f"The highest score is {scores[0]['score']:.3f} for the label {scores[0]['label']}"
)
return f"The highest score is {scores[0]['score']:.3f} for the label {scores[0]['label']}"
# Créer l'interface Gradio
interface = gr.Interface(
fn=classify_image,
inputs=[
gr.Image(label="Input Image Component", type="pil"),
gr.Textbox("apple, banana, car, dog, elephant"),
],
outputs="text",
title="Zero shot image classification",
description="classification: ",
)
# Lancer l'interface Gradio
if __name__ == "__main__":
interface.launch(
quiet=True,
)