File size: 2,601 Bytes
10f54ae a95f60e 10f54ae a95f60e 10f54ae |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
import mxnet as mx
import matplotlib.pyplot as plt
import numpy as np
from collections import namedtuple
from mxnet.gluon.data.vision import transforms
from mxnet.contrib.onnx.onnx2mx.import_model import import_model
import os
import gradio as gr
mx.test_utils.download('https://s3.amazonaws.com/model-server/inputs/kitten.jpg')
mx.test_utils.download('https://s3.amazonaws.com/onnx-model-zoo/synset.txt')
with open('synset.txt', 'r') as f:
labels = [l.rstrip() for l in f]
os.system("wget https://github.com/AK391/models/raw/main/vision/classification/alexnet/model/bvlcalexnet-7.onnx")
# Enter path to the ONNX model file
sym, arg_params, aux_params = import_model('bvlcalexnet-7.onnx')
Batch = namedtuple('Batch', ['data'])
def get_image(path, show=False):
img = mx.image.imread(path)
if img is None:
return None
if show:
plt.imshow(img.asnumpy())
plt.axis('off')
return img
def preprocess(img):
transform_fn = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
img = transform_fn(img)
img = img.expand_dims(axis=0)
return img
def predict(path):
img = get_image(path, show=True)
img = preprocess(img)
mod.forward(Batch([img]))
# Take softmax to generate probabilities
scores = mx.ndarray.softmax(mod.get_outputs()[0]).asnumpy()
# print the top-5 inferences class
scores = np.squeeze(scores)
a = np.argsort(scores)[::-1]
results = {}
for i in a[0:5]:
results[labels[i]] = float(scores[i])
return results
# Determine and set context
if len(mx.test_utils.list_gpus())==0:
ctx = mx.cpu()
else:
ctx = mx.gpu(0)
# Load module
mod = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
mod.bind(for_training=False, data_shapes=[('data', (1,3,224,224))],
label_shapes=mod._label_shapes)
mod.set_params(arg_params, aux_params, allow_missing=True, allow_extra=True)
title="SqueezeNet"
description="SqueezeNet is a small CNN which achieves AlexNet level accuracy on ImageNet with 50x fewer parameters. SqueezeNet requires less communication across servers during distributed training, less bandwidth to export a new model from the cloud to an autonomous car and more feasible to deploy on FPGAs and other hardware with limited memory."
examples=[['catonnx.jpg']]
gr.Interface(predict,gr.inputs.Image(type='filepath'),"label",title=title,description=description,examples=examples).launch(enable_queue=True) |