rahulmishra commited on
Commit
f5d4540
·
1 Parent(s): 8cca312

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +277 -0
app.py CHANGED
@@ -0,0 +1,277 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """Untitled44.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/18Lqtn6Wg43WV6woldVaN-GMbpBiOTrrC
8
+ """
9
+
10
+ import zipfile
11
+
12
+ # Download zip file of pizza_steak images
13
+ !wget https://storage.googleapis.com/ztm_tf_course/food_vision/pizza_steak.zip
14
+
15
+ # Unzip the downloaded file
16
+ zip_ref = zipfile.ZipFile("pizza_steak.zip", "r")
17
+ zip_ref.extractall()
18
+ zip_ref.close()
19
+
20
+ import os
21
+
22
+ # Walk through pizza_steak directory and list number of files
23
+ for dirpath, dirnames, filenames in os.walk("pizza_steak"):
24
+ print(f"There are {len(dirnames)} directories and {len(filenames)} images in '{dirpath}'.")
25
+
26
+ # Get the class names (programmatically, this is much more helpful with a longer list of classes)
27
+ import pathlib
28
+ import numpy as np
29
+ data_dir = pathlib.Path("pizza_steak/train/") # turn our training path into a Python path
30
+ class_names = np.array(sorted([item.name for item in data_dir.glob('*')])) # created a list of class_names from the subdirectories
31
+ print(class_names)
32
+
33
+ # View an image
34
+ import matplotlib.pyplot as plt
35
+ import matplotlib.image as mpimg
36
+ import random
37
+
38
+ def view_random_image(target_dir, target_class):
39
+ # Setup target directory (we'll view images from here)
40
+ target_folder = target_dir+target_class
41
+
42
+ # Get a random image path
43
+ random_image = random.sample(os.listdir(target_folder), 1)
44
+
45
+ # Read in the image and plot it using matplotlib
46
+ img = mpimg.imread(target_folder + "/" + random_image[0])
47
+ plt.imshow(img)
48
+ plt.title(target_class)
49
+ plt.axis("off");
50
+
51
+ print(f"Image shape: {img.shape}") # show the shape of the image
52
+
53
+ return img
54
+
55
+ # View a random image from the training dataset
56
+ img = view_random_image(target_dir="pizza_steak/train/",
57
+ target_class="steak")
58
+
59
+ import tensorflow as tf
60
+ from tensorflow.keras.preprocessing.image import ImageDataGenerator
61
+
62
+ # Define training and test directory paths
63
+ train_dir = "pizza_steak/train/"
64
+ test_dir = "pizza_steak/test/"
65
+
66
+ # Plot the validation and training data separately
67
+ def plot_loss_curves(history):
68
+ """
69
+ Returns separate loss curves for training and validation metrics.
70
+ """
71
+ loss = history.history['loss']
72
+ val_loss = history.history['val_loss']
73
+
74
+ accuracy = history.history['accuracy']
75
+ val_accuracy = history.history['val_accuracy']
76
+
77
+ epochs = range(len(history.history['loss']))
78
+
79
+ # Plot loss
80
+ plt.plot(epochs, loss, label='training_loss')
81
+ plt.plot(epochs, val_loss, label='val_loss')
82
+ plt.title('Loss')
83
+ plt.xlabel('Epochs')
84
+ plt.legend()
85
+
86
+ # Plot accuracy
87
+ plt.figure()
88
+ plt.plot(epochs, accuracy, label='training_accuracy')
89
+ plt.plot(epochs, val_accuracy, label='val_accuracy')
90
+ plt.title('Accuracy')
91
+ plt.xlabel('Epochs')
92
+ plt.legend();
93
+
94
+ # Create ImageDataGenerator training instance with data augmentation
95
+ train_datagen_augmented = ImageDataGenerator(rescale=1/255.,
96
+ rotation_range=20, # rotate the image slightly between 0 and 20 degrees (note: this is an int not a float)
97
+ shear_range=0.2, # shear the image
98
+ zoom_range=0.2, # zoom into the image
99
+ width_shift_range=0.2, # shift the image width ways
100
+ height_shift_range=0.2, # shift the image height ways
101
+ horizontal_flip=True) # flip the image on the horizontal axis
102
+
103
+ # Create ImageDataGenerator training instance without data augmentation
104
+ train_datagen = ImageDataGenerator(rescale=1/255.)
105
+
106
+ # Create ImageDataGenerator test instance without data augmentation
107
+ test_datagen = ImageDataGenerator(rescale=1/255.)
108
+
109
+ # Import data and augment it from training directory
110
+ print("Augmented training images:")
111
+ train_data_augmented = train_datagen_augmented.flow_from_directory(train_dir,
112
+ target_size=(224, 224),
113
+ batch_size=32,
114
+ class_mode='binary',
115
+ shuffle=False) # Don't shuffle for demonstration purposes, usually a good thing to shuffle
116
+
117
+ # Create non-augmented data batches
118
+ print("Non-augmented training images:")
119
+ train_data = train_datagen.flow_from_directory(train_dir,
120
+ target_size=(224, 224),
121
+ batch_size=32,
122
+ class_mode='binary',
123
+ shuffle=False) # Don't shuffle for demonstration purposes
124
+
125
+ print("Unchanged test images:")
126
+ test_data = test_datagen.flow_from_directory(test_dir,
127
+ target_size=(224, 224),
128
+ batch_size=32,
129
+ class_mode='binary')
130
+
131
+ # Import data and augment it from directories
132
+ train_data_augmented_shuffled = train_datagen_augmented.flow_from_directory(train_dir,
133
+ target_size=(224, 224),
134
+ batch_size=32,
135
+ class_mode='binary',
136
+ shuffle=True)
137
+
138
+ # Make the creating of our model a little easier
139
+ from tensorflow.keras.optimizers import Adam
140
+ from tensorflow.keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Activation
141
+ from tensorflow.keras import Sequential
142
+
143
+ # Create a CNN model (same as Tiny VGG but for binary classification - https://poloclub.github.io/cnn-explainer/ )
144
+ model_8 = Sequential([
145
+ Conv2D(10, 3, activation='relu', input_shape=(224, 224, 3)), # same input shape as our images
146
+ Conv2D(10, 3, activation='relu'),
147
+ MaxPool2D(),
148
+ Conv2D(10, 3, activation='relu'),
149
+ Conv2D(10, 3, activation='relu'),
150
+ MaxPool2D(),
151
+ Flatten(),
152
+ Dense(1, activation='sigmoid')
153
+ ])
154
+
155
+ # Compile the model
156
+ model_8.compile(loss="binary_crossentropy",
157
+ optimizer=tf.keras.optimizers.Adam(),
158
+ metrics=["accuracy"])
159
+
160
+ # Fit the model
161
+ history_8 = model_8.fit(train_data_augmented_shuffled,
162
+ epochs=30,
163
+ steps_per_epoch=len(train_data_augmented_shuffled),
164
+ validation_data=test_data,
165
+ validation_steps=len(test_data))
166
+
167
+ """# Making a prediction with our trained model"""
168
+
169
+ # Classes we're working with
170
+ print(class_names)
171
+
172
+ # View our example image
173
+ !wget https://github.com/mrdbourke/tensorflow-deep-learning/blob/ff0a93f68915e85bcb509a0c636d16f4567fbf8a/images/03-steak.jpeg
174
+ steak = mpimg.imread("03-steak.jpeg")
175
+ plt.imshow(steak)
176
+ plt.axis(False);
177
+
178
+ # Check the shape of our image
179
+ steak.shape
180
+
181
+ # Add an extra axis
182
+ print(f"Shape before new dimension: {steak.shape}")
183
+ steak = tf.expand_dims(steak, axis=0) # add an extra dimension at axis 0
184
+ #steak = steak[tf.newaxis, ...] # alternative to the above, '...' is short for 'every other dimension'
185
+ print(f"Shape after new dimension: {steak.shape}")
186
+ steak
187
+
188
+ # Make a prediction on custom image tensor
189
+ pred = model_8.predict(steak)
190
+ pred
191
+
192
+ # Load in and preprocess our custom image
193
+ #steak = pred_and_plot("03-steak.jpeg")
194
+ #steak
195
+
196
+ # Create a function to import an image and resize it to be able to be used with our model
197
+ def load_and_prep_image(filename, img_shape=224):
198
+ """
199
+ Reads an image from filename, turns it into a tensor
200
+ and reshapes it to (img_shape, img_shape, colour_channel).
201
+ """
202
+ # Read in target file (an image)
203
+ #img = tf.io.read_file(filename)
204
+ #print(img)
205
+ # Decode the read file into a tensor & ensure 3 colour channels
206
+ # (our model is trained on images with 3 colour channels and sometimes images have 4 colour channels)
207
+ img = filename
208
+ #img = tf.image.decode_image(img, channels=3)
209
+
210
+ # Resize the image (to the same size our model was trained on)
211
+ img = tf.image.resize(img, size = [img_shape, img_shape])
212
+ print(img.shape)
213
+ # Rescale the image (get all values between 0 and 1)
214
+ img = img/255.
215
+ return img
216
+
217
+ def pred_and_plot(filename):
218
+ """
219
+ Imports an image located at filename, makes a prediction on it with
220
+ a trained model and plots the image with the predicted class as the title.
221
+ """
222
+ # Import the target image and preprocess it
223
+ # print(filename.shape)
224
+ img = load_and_prep_image(filename)
225
+
226
+ # Make a prediction
227
+ pred = model_8.predict(tf.expand_dims(img, axis=0))
228
+ class_names = ['Pizza🍕','Steak🥩']
229
+ # Get the predicted class
230
+ pred_class = class_names[int(tf.round(pred)[0][0])]
231
+ return pred_class
232
+ # Plot the image and predicted class
233
+ # plt.imshow(img)
234
+ #plt.title(f"Prediction: {pred_class}")
235
+ #plt.axis(False);
236
+
237
+ # Test our model on a custom image
238
+ #pred_and_plot("03-steak.jpeg")
239
+
240
+ import cv2
241
+ val = cv2.imread("03-steak.jpeg")
242
+ val.shape
243
+
244
+ from pathlib import Path
245
+ # Create a list of example inputs to our Gradio demo
246
+ test_data_paths = list(Path(test_dir).glob("*/*.jpg"))
247
+
248
+ example_list = [[str(filepath)] for filepath in random.sample(test_data_paths, k=3)]
249
+ example_list
250
+
251
+ # Import/install Gradio
252
+ try:
253
+ import gradio as gr
254
+ except:
255
+ !pip -q install gradio
256
+ import gradio as gr
257
+
258
+ print(f"Gradio version: {gr.__version__}")
259
+
260
+ import gradio as gr
261
+
262
+ # Create title, description and article strings
263
+ title = "FoodVision Mini 🥩🍕"
264
+ description = "A CNN model to classify images of food as pizza or steak ."
265
+
266
+
267
+ # Create the Gradio demo
268
+ demo = gr.Interface(fn=pred_and_plot, # mapping function from input to output
269
+ inputs=["image"], # what are the inputs?
270
+ outputs=["text"], # our fn has two outputs, therefore we have two outputs
271
+ examples=example_list,
272
+ title=title,
273
+ description=description)
274
+
275
+ # Launch the demo!
276
+ demo.launch(inline=True) # generate a publically shareable URL?
277
+