File size: 2,512 Bytes
264e5f9
 
 
 
 
29a1c97
264e5f9
 
3a8fb0a
264e5f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29a1c97
 
264e5f9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b3cf2db
264e5f9
22115ef
264e5f9
29a1c97
b3cf2db
fa6a5e6
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
import numpy as np
import tensorflow as tf
import gradio as gr
from huggingface_hub import from_pretrained_keras
import cv2
# import matplotlib.pyplot as plt


model = from_pretrained_keras("keras-io/CutMix_data_augmentation_for_image_classification")

# functions for inference                   
IMG_SIZE = 32

class_names = [
    "Airplane",
    "Automobile",
    "Bird",
    "Cat",
    "Deer",
    "Dog",
    "Frog",
    "Horse",
    "Ship",
    "Truck",
]

# resize the image and it to a float between 0,1
def preprocess_image(image, label):
    image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
    image = tf.image.convert_image_dtype(image, tf.float32) / 255.0
    return image, label


def read_image(image):
    image = tf.convert_to_tensor(image)
    image.set_shape([None, None, 3])
    print('$$$$$$$$$$$$$$$$$$$$$ in read image $$$$$$$$$$$$$$$$$$$$$$')
    print(image.shape)
#     plt.imshow(image)
#     plt.show()
    # image = tf.image.resize(images=image, size=[IMG_SIZE, IMG_SIZE])
    # image = image / 127.5 - 1
    image, _ = preprocess_image(image, 1) #  1 here is a temporary label
    return image

def infer(input_image):
    print('#$$$$$$$$$$$$$$$$$$$$$$$$$ IN INFER $$$$$$$$$$$$$$$$$$$$$$$')
    image_tensor = read_image(input_image)
    print(image_tensor.shape)
    predictions = model.predict(np.expand_dims((image_tensor), axis=0))
    predictions = np.squeeze(predictions)
    predictions = np.argmax(predictions) # , axis=2
    predicted_label = class_names[predictions.item()]
    return str(predicted_label)
    
    
# get the inputs
input = gr.inputs.Image(shape=(IMG_SIZE, IMG_SIZE))
# the app outputs two segmented images
output = [gr.outputs.Label()]
# it's good practice to pass examples, description and a title to guide users
examples = [["./content/examples/Frog.jpg"], ["./content/examples/Truck.jpg"]] 
title = "Image classification"
description = "Upload an image or select from examples to classify it. The allowed classes are - Airplane, Automobile, Bird, Cat, Deer, Dog, Frog, Horse, Ship, Truck <p><b>Space author: Harshavardhan</b> <br><b> Keras example author: <a href=\"https://twitter.com/sayannath2350\"> Sayan Nath </a> </b> <br> <a href=\"https://keras.io/examples/vision/cutmix/\">link to the original Keras example</a> </p>"

gr_interface = gr.Interface(infer, input, output, examples=examples, allow_flagging=False, analytics_enabled=False, title=title, description=description).launch(enable_queue=True, debug=False)
gr_interface.launch()