Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import tensorflow as tf
|
3 |
+
import numpy as np
|
4 |
+
import json
|
5 |
+
from os.path import dirname, realpath, join
|
6 |
+
|
7 |
+
# Load human-readable labels for ImageNet.
|
8 |
+
current_dir = dirname(realpath(file))
|
9 |
+
with open(join(current_dir, "imagenet_labels.json")) as labels_file:
|
10 |
+
labels = json.load(labels_file)
|
11 |
+
|
12 |
+
mobile_net = tf.keras.applications.MobileNetV2()
|
13 |
+
def image_classifier(im):
|
14 |
+
arr = np.expand_dims(im, axis=0)
|
15 |
+
arr = tf.keras.applications.mobilenet.preprocess_input(arr)
|
16 |
+
prediction = mobile_net.predict(arr).flatten()
|
17 |
+
return {labels[i]: float(prediction[i]) for i in range(1000)}
|
18 |
+
|
19 |
+
iface = gr.Interface(
|
20 |
+
image_classifier,
|
21 |
+
gr.inputs.Image(shape=(224, 224)),
|
22 |
+
gr.outputs.Label(num_top_classes=3),
|
23 |
+
capture_session=True,
|
24 |
+
interpretation="default",
|
25 |
+
examples=[
|
26 |
+
["cheetah1.jpg"],
|
27 |
+
["lion.jpg"],
|
28 |
+
["ma.jpg"]
|
29 |
+
])
|
30 |
+
|
31 |
+
|
32 |
+
if name == "main":
|
33 |
+
iface.launch(share=True)
|