import math import numpy as np import pandas as pd import gradio as gr from huggingface_hub import from_pretrained_fastai from fastai.vision.all import * def get_x(x): return pascal_source/"train"/f'{x[0]}' def get_y(x): return x[1].split(' ') pascal_source = '.' EXAMPLES_PATH = Path('./examples') repo_id = "hugginglearners/multi-object-classification" learner = from_pretrained_fastai(repo_id) labels = learner.dls.vocab def infer(img): img = PILImage.create(img) _pred, _pred_w_idx, probs = learner.predict(img) # gradio doesn't support tensors, so converting to float labels_probs = {labels[i]: float(probs[i]) for i, _ in enumerate(labels)} return labels_probs # return f"This grapevine leave is {_pred} with {100*probs[torch.argmax(probs)].item():.2f}% probability" # get the inputs inputs = gr.inputs.Image(shape=(192, 192)) # the app outputs two segmented images output = gr.outputs.Label(num_top_classes=3) # it's good practice to pass examples, description and a title to guide users title = 'Multilabel Image classification' description = 'Detect which type of object appearing in the image' article = "Author: Nhu Hoang. " examples = [f'{EXAMPLES_PATH}/{f.name}' for f in EXAMPLES_PATH.iterdir()] gr.Interface(infer, inputs, output, examples= examples, allow_flagging='never', title=title, description=description, article=article, live=False).launch(enable_queue=True, debug=False, inbrowser=False)