akhaliq HF staff commited on
Commit
76ee998
1 Parent(s): 89f5826

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +69 -0
app.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import math
3
+ import matplotlib.pyplot as plt
4
+ import onnxruntime as rt
5
+ import cv2
6
+ import json
7
+ import gradio as gr
8
+
9
+ # load the labels text file
10
+ labels = json.load(open("labels_map.txt", "r"))
11
+
12
+ # set image file dimensions to 224x224 by resizing and cropping image from center
13
+ def pre_process_edgetpu(img, dims):
14
+ output_height, output_width, _ = dims
15
+ img = resize_with_aspectratio(img, output_height, output_width, inter_pol=cv2.INTER_LINEAR)
16
+ img = center_crop(img, output_height, output_width)
17
+ img = np.asarray(img, dtype='float32')
18
+ # converts jpg pixel value from [0 - 255] to float array [-1.0 - 1.0]
19
+ img -= [127.0, 127.0, 127.0]
20
+ img /= [128.0, 128.0, 128.0]
21
+ return img
22
+
23
+ # resize the image with a proportional scale
24
+ def resize_with_aspectratio(img, out_height, out_width, scale=87.5, inter_pol=cv2.INTER_LINEAR):
25
+ height, width, _ = img.shape
26
+ new_height = int(100. * out_height / scale)
27
+ new_width = int(100. * out_width / scale)
28
+ if height > width:
29
+ w = new_width
30
+ h = int(new_height * height / width)
31
+ else:
32
+ h = new_height
33
+ w = int(new_width * width / height)
34
+ img = cv2.resize(img, (w, h), interpolation=inter_pol)
35
+ return img
36
+
37
+ # crop the image around the center based on given height and width
38
+ def center_crop(img, out_height, out_width):
39
+ height, width, _ = img.shape
40
+ left = int((width - out_width) / 2)
41
+ right = int((width + out_width) / 2)
42
+ top = int((height - out_height) / 2)
43
+ bottom = int((height + out_height) / 2)
44
+ img = img[top:bottom, left:right]
45
+ return img
46
+
47
+
48
+
49
+ import onnxruntime as rt
50
+
51
+
52
+ sess = rt.InferenceSession('efficientnet-lite4-11.onnx')
53
+
54
+ def inference(img):
55
+ img = cv2.imread(img)
56
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
57
+
58
+ img = pre_process_edgetpu(img, (224, 224, 3))
59
+
60
+ img_batch = np.expand_dims(img, axis=0)
61
+
62
+ results = sess.run(["Softmax:0"], {"images:0": img_batch})[0]
63
+ result = reversed(results[0].argsort()[-5:])
64
+ resultdic = {}
65
+ for r in result:
66
+ resultdic[labels[str(r)]] = results[0][r]
67
+ return resultdic
68
+
69
+ gr.Interface(inference,gr.inputs.Image(type="filepath"),"label").launch()