Create app.py
Browse filesAdded all the code for the app.py
app.py
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import tensorflow as tf
|
3 |
+
import gradio as gr
|
4 |
+
from huggingface_hub import from_pretrained_keras
|
5 |
+
import cv2
|
6 |
+
import matplotlib.pyplot as plt
|
7 |
+
|
8 |
+
|
9 |
+
model = from_pretrained_keras("harsha163/CutMix_data_augmentation_for_image_classification")
|
10 |
+
|
11 |
+
# functions for inference
|
12 |
+
IMG_SIZE = 32
|
13 |
+
|
14 |
+
class_names = [
|
15 |
+
"Airplane",
|
16 |
+
"Automobile",
|
17 |
+
"Bird",
|
18 |
+
"Cat",
|
19 |
+
"Deer",
|
20 |
+
"Dog",
|
21 |
+
"Frog",
|
22 |
+
"Horse",
|
23 |
+
"Ship",
|
24 |
+
"Truck",
|
25 |
+
]
|
26 |
+
|
27 |
+
# resize the image and it to a float between 0,1
|
28 |
+
def preprocess_image(image, label):
|
29 |
+
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
|
30 |
+
image = tf.image.convert_image_dtype(image, tf.float32) / 255.0
|
31 |
+
return image, label
|
32 |
+
|
33 |
+
|
34 |
+
def read_image(image):
|
35 |
+
image = tf.convert_to_tensor(image)
|
36 |
+
image.set_shape([None, None, 3])
|
37 |
+
print('$$$$$$$$$$$$$$$$$$$$$ in read image $$$$$$$$$$$$$$$$$$$$$$')
|
38 |
+
print(image.shape)
|
39 |
+
plt.imshow(image)
|
40 |
+
plt.show()
|
41 |
+
# image = tf.image.resize(images=image, size=[IMG_SIZE, IMG_SIZE])
|
42 |
+
# image = image / 127.5 - 1
|
43 |
+
image, _ = preprocess_image(image, 1) # 1 here is a temporary label
|
44 |
+
return image
|
45 |
+
|
46 |
+
def infer(input_image):
|
47 |
+
print('#$$$$$$$$$$$$$$$$$$$$$$$$$ IN INFER $$$$$$$$$$$$$$$$$$$$$$$')
|
48 |
+
image_tensor = read_image(input_image)
|
49 |
+
print(image_tensor.shape)
|
50 |
+
predictions = model.predict(np.expand_dims((image_tensor), axis=0))
|
51 |
+
predictions = np.squeeze(predictions)
|
52 |
+
predictions = np.argmax(predictions) # , axis=2
|
53 |
+
predicted_label = class_names[predictions.item()]
|
54 |
+
return str(predicted_label)
|
55 |
+
|
56 |
+
|
57 |
+
# get the inputs
|
58 |
+
input = gr.inputs.Image(shape=(IMG_SIZE, IMG_SIZE))
|
59 |
+
# the app outputs two segmented images
|
60 |
+
output = [gr.outputs.Label()]
|
61 |
+
# it's good practice to pass examples, description and a title to guide users
|
62 |
+
examples = [["./content/examples/Frog.jpeg"], ["./content/examples/Truck.jpeg"]]
|
63 |
+
title = "Image classification"
|
64 |
+
description = "Upload an image or select from examples to classify it"
|
65 |
+
|
66 |
+
gr_interface = gr.Interface(infer, input, output, examples=examples, allow_flagging=False, analytics_enabled=False, title=title, description=description).launch(enable_queue=True, debug=True)
|
67 |
+
gr_interface.launch()
|