import gradio as gr import numpy as np import os from hugsvision.inference.TorchVisionClassifierInference import TorchVisionClassifierInference models_name = [ "VGG16", "ShuffleNetV2", "mobilenet_v2" ] colname = "mobilenet_v2" radio = gr.inputs.Radio(models_name, default="mobilenet_v2", type="value", label=colname) print(radio.label) def predict_image(image): image = np.array(image) / 255 image = np.expand_dims(image, axis=0) classifier = TorchVisionClassifierInference( model_path = "./models/" + colname + ".pth", ) pred = classifier.predict(img=image) return pred # open categories.txt in read mode categories = open("categories.txt", "r") labels = categories.readline().split(";") image = gr.inputs.Image(shape=(300, 300), label="Upload Your Image Here") label = gr.outputs.Label(num_top_classes=len(labels)) samples = ['./samples/basking.jpg', './samples/blacktip.jpg'] # , './samples/blacktip.jpg', './samples/blue.jpg', './samples/bull.jpg', './samples/hammerhead.jpg', # './samples/lemon.jpg', './samples/mako.jpg', './samples/nurse.jpg', './samples/sand tiger.jpg', './samples/thresher.jpg', # './samples/tigre.jpg', './samples/whale.jpg', './samples/white.jpg', './samples/whitetip.jpg'] interface = gr.Interface( fn=predict_image, inputs=[image, radio], outputs=label, capture_session=True, allow_flagging=False, examples=samples ) interface.launch()