harshaUwm163 commited on
Commit
2f91610
1 Parent(s): d08d928

added the app.py, images and requirements

Browse files
app.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import tensorflow as tf
3
+ import gradio as gr
4
+ from huggingface_hub import from_pretrained_keras
5
+ import cv2
6
+ # import matplotlib.pyplot as plt
7
+
8
+
9
+ model = from_pretrained_keras("harsha163/convMixer")
10
+
11
+ # functions for inference
12
+ IMG_SIZE = 32
13
+
14
+ class_names = [
15
+ "Airplane",
16
+ "Automobile",
17
+ "Bird",
18
+ "Cat",
19
+ "Deer",
20
+ "Dog",
21
+ "Frog",
22
+ "Horse",
23
+ "Ship",
24
+ "Truck",
25
+ ]
26
+
27
+ # resize the image and it to a float between 0,1
28
+ def preprocess_image(image, label):
29
+ image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
30
+ return image, label
31
+
32
+
33
+ def read_image(image):
34
+ image = tf.convert_to_tensor(image)
35
+ image.set_shape([None, None, 3])
36
+ print('$$$$$$$$$$$$$$$$$$$$$ in read image $$$$$$$$$$$$$$$$$$$$$$')
37
+ print(image.shape)
38
+ # plt.imshow(image)
39
+ # plt.show()
40
+ # image = tf.image.resize(images=image, size=[IMG_SIZE, IMG_SIZE])
41
+ # image = image / 127.5 - 1
42
+ image, _ = preprocess_image(image, 1) # 1 here is a temporary label
43
+ return image
44
+
45
+ def infer(input_image):
46
+ print('#$$$$$$$$$$$$$$$$$$$$$$$$$ IN INFER $$$$$$$$$$$$$$$$$$$$$$$')
47
+ image_tensor = read_image(input_image)
48
+ print(image_tensor.shape)
49
+ predictions = model.predict(np.expand_dims((image_tensor), axis=0))
50
+ predictions = np.squeeze(predictions)
51
+ predictions = np.argmax(predictions) # , axis=2
52
+ predicted_label = class_names[predictions.item()]
53
+ return str(predicted_label)
54
+
55
+
56
+ # get the inputs
57
+ input = gr.inputs.Image(shape=(IMG_SIZE, IMG_SIZE))
58
+ # the app outputs two segmented images
59
+ output = [gr.outputs.Label()]
60
+ # it's good practice to pass examples, description and a title to guide users
61
+ examples = [["./content/examples/Frog.jpg"], ["./content/examples/Truck.jpg"], ["./content/examples/car.jpg"]]
62
+ title = "Image classification"
63
+ description = "Upload an image or select from examples to classify it. This is a <b>mini ViT model with Token Learner module</b> trained on <b>CIFAR-10</b>. The allowed classes are - Airplane, Automobile, Bird, Cat, Deer, Dog, Frog, Horse, Ship, Truck <p><b>Space author: Harshavardhan</b> <br><b> Keras example authors: <a href=\"https://twitter.com/ariG23498\"> Aritra Roy Gosthipaty </a>, <a href=\"https://twitter.com/RisingSayak\"> Sayak Paul </a> </b> <br> <a href=\"https://keras.io/examples/vision/convmixer/\">link to the original Keras example</a> <br> Note: please note that the test accuracy of this model is only ~55%, so, you will see a lot of errors in prediction </p>"
64
+
65
+ gr_interface = gr.Interface(infer, input, output, examples=examples, allow_flagging=False, analytics_enabled=False, title=title, description=description).launch(enable_queue=True, debug=False)
66
+ gr_interface.launch()
67
+
68
+
69
+
70
+
content/examples/Frog.jpg ADDED
content/examples/Truck.jpg ADDED
content/examples/car.jpg ADDED
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ tensorflow
2
+ opencv-python
3
+ matplotlib
4
+ numpy
5
+ huggingface-hub
6
+ gradio