|
import torch |
|
from transformers import AutoModelForImageClassification, AutoFeatureExtractor |
|
import gradio as gr |
|
|
|
model_id = f'omarques/vit-base-patch16-224-finetuned-flower' |
|
labels = ['daisy', 'dandelion', 'roses', 'sunflowers', 'tulips'] |
|
|
|
def classify_image(image): |
|
model = AutoModelForImageClassification.from_pretrained(model_id) |
|
feature_extractor = AutoFeatureExtractor.from_pretrained(model_id) |
|
inp = feature_extractor(image, return_tensors='pt') |
|
outp = model(**inp) |
|
pred = torch.nn.functional.softmax(outp.logits, dim=-1) |
|
preds = pred[0].cpu().detach().numpy() |
|
confidence = {label: float(preds[i]) for i, label in enumerate(labels)} |
|
return confidence |
|
|
|
interface = gr.Interface(fn=classify_image, |
|
inputs='image', |
|
examples=['flower-1.jpeg', 'flower-2.jpeg'], |
|
outputs='label').launch()) |