Spaces:
Runtime error
Runtime error
File size: 1,060 Bytes
b9efb29 832be83 b9efb29 4b1f91a 8a82feb b9efb29 8a82feb b9efb29 fd3d42c 8a82feb 832be83 fd3d42c 8a82feb 02ba6f7 8a82feb 02ba6f7 8a82feb fd3d42c bcf7613 8a82feb 02ba6f7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import gradio as gr
from fastai.vision.all import load_learner, PILImage
def is_cat(x):
return x[0].isupper()
learn = load_learner("model.pkl")
labels = learn.dls.vocab
categories = ("Dog", "Cat")
def predict(img):
img = PILImage.create(img)
_, _, probs = learn.predict(img)
return dict(zip(categories, map(float, probs)))
# return {labels[i]: float(probs[i]) for i in range(len(labels))}
title = "Cat or Dog Classifier"
description = "Classifier to determine if a photo is a cat or not."
article = "<p style='text-align: center'><a href='https://tmabraham.github.io/blog/gradio_hf_spaces_tutorial' target='_blank'>Blog post</a></p>"
examples = ["siamese.PNG", "poodle.jpg", "panda.jpg"]
interpretation = "default"
enable_queue = True
gr.Interface(
fn=predict,
inputs=gr.components.Image(shape=(512, 512)),
outputs=gr.components.Label(num_top_classes=3),
title=title,
description=description,
article=article,
examples=examples,
interpretation=interpretation,
).launch(enable_queue=enable_queue)
|