|
|
|
|
|
|
|
__all__ = ['learner', 'labels', 'title', 'description', 'article', 'image', 'label', 'examples', 'interpretation', 'enable_queue', |
|
'intf', 'classify_image'] |
|
|
|
|
|
from fastai.vision.all import * |
|
import scipy |
|
import gradio as gr |
|
|
|
|
|
learner = load_learner('pet_learner.pkl') |
|
labels = learner.dls.vocab |
|
|
|
def classify_image(img): |
|
"""Gradio need a f-n that returns a dict of each class and its probability. |
|
It also does not accept tensors. |
|
""" |
|
img = PILImage.create(img) |
|
pred, pred_idx, probs = learner.predict(img) |
|
return dict(zip(labels, map(float, probs))) |
|
|
|
|
|
title = "Cat & Dog Breed Classifier" |
|
description = "A pet breed classifier trained on the Oxford Pets dataset with fastai. Created as a demo for Gradio and HuggingFace Spaces." |
|
article="<p style='text-align: center'><a href='https://tmabraham.github.io/blog/gradio_hf_spaces_tutorial' target='_blank'>Blog post</a></p>" |
|
|
|
image = gr.components.Image(shape=(512,512)) |
|
label = gr.components.Label(num_top_classes=3) |
|
examples = ['shiba.jpeg', 'yorkshire_terrier.jpeg'] |
|
interpretation = 'default' |
|
enable_queue=True |
|
|
|
intf = gr.Interface(classify_image, |
|
inputs = image, |
|
outputs=label, |
|
examples=examples, |
|
title=title, |
|
description=description, |
|
interpretation=interpretation) |
|
|
|
intf.launch(enable_queue=enable_queue) |
|
|