akhaliq HF staff commited on
Commit
188d4b8
1 Parent(s): bcc5f4c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -58
app.py CHANGED
@@ -1,71 +1,66 @@
1
- import numpy as np
2
- import math
3
  import matplotlib.pyplot as plt
4
- import onnxruntime as rt
5
- import cv2
6
- import json
 
7
  import gradio as gr
8
- from huggingface_hub import hf_hub_download
9
- import onnxruntime as rt
10
 
11
- modele = hf_hub_download(repo_id="onnx/Inception-1-int8", filename="inception-v1-12-int8.onnx")
12
- # load the labels text file
13
- labels = json.load(open("labels_maps.txt", "r"))
14
 
15
- # set image file dimensions to 224x224 by resizing and cropping image from center
16
- def pre_process_edgetpu(img, dims):
17
- output_height, output_width, _ = dims
18
- img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2.INTER_LINEAR)
19
- img = center_crop(img, output_height, output_width)
20
- img = np.asarray(img, dtype='float32')
21
- # converts jpg pixel value from [0 - 255] to float array [-1.0 - 1.0]
22
- img -= [127.0, 127.0, 127.0]
23
- img /= [128.0, 128.0, 128.0]
24
  return img
25
 
26
- # resize the image with a proportional scale
27
- def resize_with_aspectratio(img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR):
28
- height, width, _ = img.shape
29
- new_height = int(100. * out_height / scale)
30
- new_width = int(100. * out_width / scale)
31
- if height > width:
32
- w = new_width
33
- h = int(new_height * height / width)
34
- else:
35
- h = new_height
36
- w = int(new_width * width / height)
37
- img = cv2.resize(img, (w, h), interpolation=inter_pol)
38
- return img
39
 
40
- # crop the image around the center based on given height and width
41
- def center_crop(img, out_height, out_width):
42
- height, width, _ = img.shape
43
- left = int((width - out_width) / 2)
44
- right = int((width + out_width) / 2)
45
- top = int((height - out_height) / 2)
46
- bottom = int((height + out_height) / 2)
47
- img = img[top:bottom, left:right]
48
  return img
49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
 
51
- sess = rt.InferenceSession('inception-v1-12.onnx')
 
 
 
 
 
52
 
53
- def inference(img):
54
- img = cv2.imread(img)
55
- img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
56
-
57
- img = pre_process_edgetpu(img, (224, 224, 3))
58
-
59
- img_batch = np.expand_dims(img, axis=0)
60
 
61
- results = sess.run(["Softmax:0"], {"images:0": img_batch})[0]
62
- result = reversed(results[0].argsort()[-5:])
63
- resultdic = {}
64
- for r in result:
65
- resultdic[labels[str(r)]] = float(results[0][r])
66
- return resultdic
67
-
68
- title="EfficientNet-Lite4"
69
- description="EfficientNet-Lite 4 is the largest variant and most accurate of the set of EfficientNet-Lite model. It is an integer-only quantized model that produces the highest accuracy of all of the EfficientNet models. It achieves 80.4% ImageNet top-1 accuracy, while still running in real-time (e.g. 30ms/image) on a Pixel 4 CPU."
70
  examples=[['catonnx.jpg']]
71
- gr.Interface(inference,gr.inputs.Image(type="filepath"),"label",title=title,description=description,examples=examples).launch()
 
1
+ import mxnet as mx
 
2
  import matplotlib.pyplot as plt
3
+ import numpy as np
4
+ from collections import namedtuple
5
+ from mxnet.gluon.data.vision import transforms
6
+ import os
7
  import gradio as gr
 
 
8
 
9
+ from PIL import Image
10
+ import imageio
11
+ import onnxruntime as ort
12
 
13
+ def get_image(path):
14
+ '''
15
+ Using path to image, return the RGB load image
16
+ '''
17
+ img = imageio.imread(path, pilmode='RGB')
 
 
 
 
18
  return img
19
 
20
+ # Pre-processing function for ImageNet models using numpy
21
+ def preprocess(img):
22
+ '''
23
+ Preprocessing required on the images for inference with mxnet gluon
24
+ The function takes loaded image and returns processed tensor
25
+ '''
26
+ img = np.array(Image.fromarray(img).resize((224, 224))).astype(np.float32)
27
+ img[:, :, 0] -= 123.68
28
+ img[:, :, 1] -= 116.779
29
+ img[:, :, 2] -= 103.939
30
+ img[:,:,[0,1,2]] = img[:,:,[2,1,0]]
31
+ img = img.transpose((2, 0, 1))
32
+ img = np.expand_dims(img, axis=0)
33
 
 
 
 
 
 
 
 
 
34
  return img
35
 
36
+ mx.test_utils.download('https://s3.amazonaws.com/model-server/inputs/kitten.jpg')
37
+
38
+ mx.test_utils.download('https://s3.amazonaws.com/onnx-model-zoo/synset.txt')
39
+ with open('synset.txt', 'r') as f:
40
+ labels = [l.rstrip() for l in f]
41
+
42
+ os.system("wget https://github.com/onnx/models/raw/main/vision/classification/inception_and_googlenet/googlenet/model/googlenet-9.onnx")
43
+
44
+ ort_session = ort.InferenceSession("googlenet-9.onnx")
45
+
46
+
47
+ def predict(path):
48
+ img_batch = preprocess(get_image(path))
49
+
50
+ outputs = ort_session.run(
51
+ None,
52
+ {"data_0": img_batch.astype(np.float32)},
53
+ )
54
 
55
+ a = np.argsort(-outputs[0].flatten())
56
+ results = {}
57
+ for i in a[0:5]:
58
+ results[labels[i]]=float(outputs[0][0][i])
59
+ return results
60
+
61
 
62
+ title="GoogleNet"
63
+ description="GoogLeNet is the name of a convolutional neural network for classification, which competed in the ImageNet Large Scale Visual Recognition Challenge in 2014."
 
 
 
 
 
64
 
 
 
 
 
 
 
 
 
 
65
  examples=[['catonnx.jpg']]
66
+ gr.Interface(predict,gr.inputs.Image(type='filepath'),"label",title=title,description=description,examples=examples).launch(enable_queue=True,debug=True)