SparshSG commited on
Commit
597251d
1 Parent(s): b811d1e

Upload 22 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ FoodVision.hdf5 filter=lfs diff=lfs merge=lfs -text
37
+ sample_images/sushi.jpg filter=lfs diff=lfs merge=lfs -text
.idea/.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Default ignored files
2
+ /shelf/
3
+ /workspace.xml
.idea/.name ADDED
@@ -0,0 +1 @@
 
 
1
+ app.py
.idea/FoodVision.iml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager">
4
+ <content url="file://$MODULE_DIR$" />
5
+ <orderEntry type="jdk" jdkName="Python 3.11" jdkType="Python SDK" />
6
+ <orderEntry type="sourceFolder" forTests="false" />
7
+ </component>
8
+ </module>
.idea/inspectionProfiles/profiles_settings.xml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ <component name="InspectionProjectProfileManager">
2
+ <settings>
3
+ <option name="USE_PROJECT_PROFILE" value="false" />
4
+ <version value="1.0" />
5
+ </settings>
6
+ </component>
.idea/misc.xml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.11" project-jdk-type="Python SDK" />
4
+ </project>
.idea/modules.xml ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="ProjectModuleManager">
4
+ <modules>
5
+ <module fileurl="file://$PROJECT_DIR$/.idea/FoodVision.iml" filepath="$PROJECT_DIR$/.idea/FoodVision.iml" />
6
+ </modules>
7
+ </component>
8
+ </project>
.idea/workspace.xml ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <project version="4">
3
+ <component name="AutoImportSettings">
4
+ <option name="autoReloadType" value="SELECTIVE" />
5
+ </component>
6
+ <component name="ChangeListManager">
7
+ <list default="true" id="1e69dd8b-dcce-48e7-b98e-c0f62bebe947" name="Changes" comment="" />
8
+ <option name="SHOW_DIALOG" value="false" />
9
+ <option name="HIGHLIGHT_CONFLICTS" value="true" />
10
+ <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
11
+ <option name="LAST_RESOLUTION" value="IGNORE" />
12
+ </component>
13
+ <component name="MarkdownSettingsMigration">
14
+ <option name="stateVersion" value="1" />
15
+ </component>
16
+ <component name="ProjectId" id="2XIBP0z526oyXyOdRYelkE0iL3Y" />
17
+ <component name="ProjectViewState">
18
+ <option name="hideEmptyMiddlePackages" value="true" />
19
+ <option name="showLibraryContents" value="true" />
20
+ </component>
21
+ <component name="PropertiesComponent"><![CDATA[{
22
+ "keyToString": {
23
+ "RunOnceActivity.OpenProjectViewOnStart": "true",
24
+ "RunOnceActivity.ShowReadmeOnStart": "true",
25
+ "settings.editor.selected.configurable": "preferences.pluginManager"
26
+ }
27
+ }]]></component>
28
+ <component name="SpellCheckerSettings" RuntimeDictionaries="0" Folders="0" CustomDictionaries="0" DefaultDictionary="application-level" UseSingleDictionary="true" transferred="true" />
29
+ <component name="TaskManager">
30
+ <task active="true" id="Default" summary="Default task">
31
+ <changelist id="1e69dd8b-dcce-48e7-b98e-c0f62bebe947" name="Changes" comment="" />
32
+ <created>1698306686202</created>
33
+ <option name="number" value="Default" />
34
+ <option name="presentableId" value="Default" />
35
+ <updated>1698306686202</updated>
36
+ </task>
37
+ <servers />
38
+ </component>
39
+ </project>
.ipynb_checkpoints/model_training-checkpoint.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
FoodVision.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32acc1c81bcc4513a3d399a8d4cf3b42657884899e6ac72669bd7dbe6eb92521
3
+ size 81206904
__pycache__/utils.cpython-311.pyc ADDED
Binary file (2.44 kB). View file
 
helper_functions.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### We create a bunch of helpful functions throughout the course.
2
+ ### Storing them here so they're easily accessible.
3
+
4
+ import tensorflow as tf
5
+
6
+ # Create a function to import an image and resize it to be able to be used with our model
7
+ def load_and_prep_image(filename, img_shape=224, scale=True):
8
+ """
9
+ Reads in an image from filename, turns it into a tensor and reshapes into
10
+ (224, 224, 3).
11
+
12
+ Parameters
13
+ ----------
14
+ filename (str): string filename of target image
15
+ img_shape (int): size to resize target image to, default 224
16
+ scale (bool): whether to scale pixel values to range(0, 1), default True
17
+ """
18
+ # Read in the image
19
+ img = tf.io.read_file(filename)
20
+ # Decode it into a tensor
21
+ img = tf.image.decode_jpeg(img)
22
+ # Resize the image
23
+ img = tf.image.resize(img, [img_shape, img_shape])
24
+ if scale:
25
+ # Rescale the image (get all values between 0 and 1)
26
+ return img/255.
27
+ else:
28
+ return img
29
+
30
+ # Note: The following confusion matrix code is a remix of Scikit-Learn's
31
+ # plot_confusion_matrix function - https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html
32
+ import itertools
33
+ import matplotlib.pyplot as plt
34
+ import numpy as np
35
+ from sklearn.metrics import confusion_matrix
36
+
37
+ # Our function needs a different name to sklearn's plot_confusion_matrix
38
+ def make_confusion_matrix(y_true, y_pred, classes=None, figsize=(10, 10), text_size=15, norm=False, savefig=False):
39
+ """Makes a labelled confusion matrix comparing predictions and ground truth labels.
40
+
41
+ If classes is passed, confusion matrix will be labelled, if not, integer class values
42
+ will be used.
43
+
44
+ Args:
45
+ y_true: Array of truth labels (must be same shape as y_pred).
46
+ y_pred: Array of predicted labels (must be same shape as y_true).
47
+ classes: Array of class labels (e.g. string form). If `None`, integer labels are used.
48
+ figsize: Size of output figure (default=(10, 10)).
49
+ text_size: Size of output figure text (default=15).
50
+ norm: normalize values or not (default=False).
51
+ savefig: save confusion matrix to file (default=False).
52
+
53
+ Returns:
54
+ A labelled confusion matrix plot comparing y_true and y_pred.
55
+
56
+ Example usage:
57
+ make_confusion_matrix(y_true=test_labels, # ground truth test labels
58
+ y_pred=y_preds, # predicted labels
59
+ classes=class_names, # array of class label names
60
+ figsize=(15, 15),
61
+ text_size=10)
62
+ """
63
+ # Create the confustion matrix
64
+ cm = confusion_matrix(y_true, y_pred)
65
+ cm_norm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] # normalize it
66
+ n_classes = cm.shape[0] # find the number of classes we're dealing with
67
+
68
+ # Plot the figure and make it pretty
69
+ fig, ax = plt.subplots(figsize=figsize)
70
+ cax = ax.matshow(cm, cmap=plt.cm.Blues) # colors will represent how 'correct' a class is, darker == better
71
+ fig.colorbar(cax)
72
+
73
+ # Are there a list of classes?
74
+ if classes:
75
+ labels = classes
76
+ else:
77
+ labels = np.arange(cm.shape[0])
78
+
79
+ # Label the axes
80
+ ax.set(title="Confusion Matrix",
81
+ xlabel="Predicted label",
82
+ ylabel="True label",
83
+ xticks=np.arange(n_classes), # create enough axis slots for each class
84
+ yticks=np.arange(n_classes),
85
+ xticklabels=labels, # axes will labeled with class names (if they exist) or ints
86
+ yticklabels=labels)
87
+
88
+ # Make x-axis labels appear on bottom
89
+ ax.xaxis.set_label_position("bottom")
90
+ ax.xaxis.tick_bottom()
91
+
92
+ # Set the threshold for different colors
93
+ threshold = (cm.max() + cm.min()) / 2.
94
+
95
+ # Plot the text on each cell
96
+ for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
97
+ if norm:
98
+ plt.text(j, i, f"{cm[i, j]} ({cm_norm[i, j]*100:.1f}%)",
99
+ horizontalalignment="center",
100
+ color="white" if cm[i, j] > threshold else "black",
101
+ size=text_size)
102
+ else:
103
+ plt.text(j, i, f"{cm[i, j]}",
104
+ horizontalalignment="center",
105
+ color="white" if cm[i, j] > threshold else "black",
106
+ size=text_size)
107
+
108
+ # Save the figure to the current working directory
109
+ if savefig:
110
+ fig.savefig("confusion_matrix.png")
111
+
112
+ # Make a function to predict on images and plot them (works with multi-class)
113
+ def pred_and_plot(model, filename, class_names):
114
+ """
115
+ Imports an image located at filename, makes a prediction on it with
116
+ a trained model and plots the image with the predicted class as the title.
117
+ """
118
+ # Import the target image and preprocess it
119
+ img = load_and_prep_image(filename)
120
+
121
+ # Make a prediction
122
+ pred = model.predict(tf.expand_dims(img, axis=0))
123
+
124
+ # Get the predicted class
125
+ if len(pred[0]) > 1: # check for multi-class
126
+ pred_class = class_names[pred.argmax()] # if more than one output, take the max
127
+ else:
128
+ pred_class = class_names[int(tf.round(pred)[0][0])] # if only one output, round
129
+
130
+ # Plot the image and predicted class
131
+ plt.imshow(img)
132
+ plt.title(f"Prediction: {pred_class}")
133
+ plt.axis(False);
134
+
135
+ import datetime
136
+
137
+ def create_tensorboard_callback(dir_name, experiment_name):
138
+ """
139
+ Creates a TensorBoard callback instand to store log files.
140
+
141
+ Stores log files with the filepath:
142
+ "dir_name/experiment_name/current_datetime/"
143
+
144
+ Args:
145
+ dir_name: target directory to store TensorBoard log files
146
+ experiment_name: name of experiment directory (e.g. efficientnet_model_1)
147
+ """
148
+ log_dir = dir_name + "/" + experiment_name + "/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
149
+ tensorboard_callback = tf.keras.callbacks.TensorBoard(
150
+ log_dir=log_dir
151
+ )
152
+ print(f"Saving TensorBoard log files to: {log_dir}")
153
+ return tensorboard_callback
154
+
155
+ # Plot the validation and training data separately
156
+ import matplotlib.pyplot as plt
157
+
158
+ def plot_loss_curves(history):
159
+ """
160
+ Returns separate loss curves for training and validation metrics.
161
+
162
+ Args:
163
+ history: TensorFlow model History object (see: https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/History)
164
+ """
165
+ loss = history.history['loss']
166
+ val_loss = history.history['val_loss']
167
+
168
+ accuracy = history.history['accuracy']
169
+ val_accuracy = history.history['val_accuracy']
170
+
171
+ epochs = range(len(history.history['loss']))
172
+
173
+ # Plot loss
174
+ plt.plot(epochs, loss, label='training_loss')
175
+ plt.plot(epochs, val_loss, label='val_loss')
176
+ plt.title('Loss')
177
+ plt.xlabel('Epochs')
178
+ plt.legend()
179
+
180
+ # Plot accuracy
181
+ plt.figure()
182
+ plt.plot(epochs, accuracy, label='training_accuracy')
183
+ plt.plot(epochs, val_accuracy, label='val_accuracy')
184
+ plt.title('Accuracy')
185
+ plt.xlabel('Epochs')
186
+ plt.legend();
187
+
188
+ def compare_historys(original_history, new_history, initial_epochs=5):
189
+ """
190
+ Compares two TensorFlow model History objects.
191
+
192
+ Args:
193
+ original_history: History object from original model (before new_history)
194
+ new_history: History object from continued model training (after original_history)
195
+ initial_epochs: Number of epochs in original_history (new_history plot starts from here)
196
+ """
197
+
198
+ # Get original history measurements
199
+ acc = original_history.history["accuracy"]
200
+ loss = original_history.history["loss"]
201
+
202
+ val_acc = original_history.history["val_accuracy"]
203
+ val_loss = original_history.history["val_loss"]
204
+
205
+ # Combine original history with new history
206
+ total_acc = acc + new_history.history["accuracy"]
207
+ total_loss = loss + new_history.history["loss"]
208
+
209
+ total_val_acc = val_acc + new_history.history["val_accuracy"]
210
+ total_val_loss = val_loss + new_history.history["val_loss"]
211
+
212
+ # Make plots
213
+ plt.figure(figsize=(8, 8))
214
+ plt.subplot(2, 1, 1)
215
+ plt.plot(total_acc, label='Training Accuracy')
216
+ plt.plot(total_val_acc, label='Validation Accuracy')
217
+ plt.plot([initial_epochs-1, initial_epochs-1],
218
+ plt.ylim(), label='Start Fine Tuning') # reshift plot around epochs
219
+ plt.legend(loc='lower right')
220
+ plt.title('Training and Validation Accuracy')
221
+
222
+ plt.subplot(2, 1, 2)
223
+ plt.plot(total_loss, label='Training Loss')
224
+ plt.plot(total_val_loss, label='Validation Loss')
225
+ plt.plot([initial_epochs-1, initial_epochs-1],
226
+ plt.ylim(), label='Start Fine Tuning') # reshift plot around epochs
227
+ plt.legend(loc='upper right')
228
+ plt.title('Training and Validation Loss')
229
+ plt.xlabel('epoch')
230
+ plt.show()
231
+
232
+ # Create function to unzip a zipfile into current working directory
233
+ # (since we're going to be downloading and unzipping a few files)
234
+ import zipfile
235
+
236
+ def unzip_data(filename):
237
+ """
238
+ Unzips filename into the current working directory.
239
+
240
+ Args:
241
+ filename (str): a filepath to a target zip folder to be unzipped.
242
+ """
243
+ zip_ref = zipfile.ZipFile(filename, "r")
244
+ zip_ref.extractall()
245
+ zip_ref.close()
246
+
247
+ # Walk through an image classification directory and find out how many files (images)
248
+ # are in each subdirectory.
249
+ import os
250
+
251
+ def walk_through_dir(dir_path):
252
+ """
253
+ Walks through dir_path returning its contents.
254
+
255
+ Args:
256
+ dir_path (str): target directory
257
+
258
+ Returns:
259
+ A print out of:
260
+ number of subdiretories in dir_path
261
+ number of images (files) in each subdirectory
262
+ name of each subdirectory
263
+ """
264
+ for dirpath, dirnames, filenames in os.walk(dir_path):
265
+ print(f"There are {len(dirnames)} directories and {len(filenames)} images in '{dirpath}'.")
266
+
267
+ # Function to evaluate: accuracy, precision, recall, f1-score
268
+ from sklearn.metrics import accuracy_score, precision_recall_fscore_support
269
+
270
+ def calculate_results(y_true, y_pred):
271
+ """
272
+ Calculates model accuracy, precision, recall and f1 score of a binary classification model.
273
+
274
+ Args:
275
+ y_true: true labels in the form of a 1D array
276
+ y_pred: predicted labels in the form of a 1D array
277
+
278
+ Returns a dictionary of accuracy, precision, recall, f1-score.
279
+ """
280
+ # Calculate model accuracy
281
+ model_accuracy = accuracy_score(y_true, y_pred) * 100
282
+ # Calculate model precision, recall and f1 score using "weighted average
283
+ model_precision, model_recall, model_f1, _ = precision_recall_fscore_support(y_true, y_pred, average="weighted")
284
+ model_results = {"accuracy": model_accuracy,
285
+ "precision": model_precision,
286
+ "recall": model_recall,
287
+ "f1": model_f1}
288
+ return model_results
model_training.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
model_training.py ADDED
@@ -0,0 +1,408 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """model_training.ipynb
3
+
4
+ Automatically generated by Colaboratory.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/drive/1LgqvdLV1teCsAi6qjR_BBVt4TwX7vx9J
8
+
9
+ <a href="https://colab.research.google.com/github/gauravreddy08/food-vision/blob/main/model_training.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
10
+
11
+ # **Food Vision** 🍔
12
+
13
+ As an introductory project to myself, I built an **end-to-end CNN Image Classification Model** which identifies the food in your image.
14
+
15
+ I worked out with a pretrained Image Classification Model that comes with Keras and then retrained it on the infamous **Food101 Dataset**.
16
+
17
+
18
+ **Fun Fact :**
19
+
20
+ The Model actually beats the DeepFood Paper's model which also trained on the same dataset.
21
+
22
+ The Accuracy of [**DeepFood**](https://arxiv.org/abs/1606.05675) was **77.4%** and our model's is **85%**. Difference of **8%** ain't much but the interesting thing is, DeepFood's model took 2-3 days to train while our's was around 60min.
23
+
24
+ > **Dataset :** `Food101`
25
+
26
+ > **Model :** `EfficientNetB1`
27
+
28
+ ## **Setting up the Workspace**
29
+
30
+ * Checking the GPU
31
+ * Mounting Google Drive
32
+ * Importing Tensorflow
33
+ * Importing other required Packages
34
+
35
+ ### **Checking the GPU**
36
+
37
+ For this Project we will working with **Mixed Precision**. And mixed precision works best with a with a GPU with compatibility capacity **7.0+**.
38
+
39
+ At the time of writing, colab offers the following GPU's :
40
+ * Nvidia K80
41
+ * **Nvidia T4**
42
+ * Nvidia P100
43
+
44
+ Colab allocates a random GPU everytime we factory reset runtime. So you can reset the runtime till you get a **Tesla T4 GPU** as T4 GPU has a rating 7.5.
45
+
46
+ > In case using local hardware, use a GPU with rating 7.0+ for better results.
47
+
48
+ Run the below cell to see which GPU is allocated to you.
49
+ """
50
+
51
+ !nvidia-smi -L
52
+
53
+ """
54
+ ### **Mounting Google Drive**
55
+
56
+
57
+ """
58
+
59
+ from google.colab import drive
60
+ drive.mount('/content/drive')
61
+
62
+ """### **Importing Tensorflow**
63
+
64
+ At the time of writing, `tesnorflow 2.5.0` has a bug with EfficientNet Models. [Click Here](https://github.com/tensorflow/tensorflow/issues/49725) to get more info about the bug. Hopefully tensorflow fixes it soon.
65
+
66
+ So the below code is used to downgrade the version to `tensorflow 2.4.1`, it will take a moment to uninstall the previous version and install our required version.
67
+
68
+ > You need to restart the **Runtime** after required version of tensorflow is installed.
69
+
70
+ **Note :** Restarting runtime won't assign you a new GPU.
71
+ """
72
+
73
+ #!pip install tensorflow==2.4.1
74
+ import tensorflow as tf
75
+ print(tf.__version__)
76
+
77
+ """### **Importing other required Packages**"""
78
+
79
+ import pandas as pd
80
+ import numpy as np
81
+ import matplotlib.pyplot as plt
82
+ import datetime
83
+ import os
84
+ import tensorflow_datasets as tfds
85
+ import seaborn as sn
86
+
87
+ """#### **Importing `helper_fuctions`**
88
+
89
+ The `helper_functions.py` is a python script created by me. Which has some important functions I use frequently while building Deep Learning Models.
90
+ """
91
+
92
+ !wget https://raw.githubusercontent.com/sg-sparsh-goyal/extras/main/helper_function.py
93
+
94
+ from helper_function import plot_loss_curves, load_and_prep_image
95
+
96
+ """## **Getting the Data Ready**
97
+
98
+ The Dataset used is **Food101**, which is available on both Kaggle and Tensorflow.
99
+
100
+ In the below cells we will be importing Datasets from `Tensorflow Datasets` Module.
101
+
102
+ """
103
+
104
+ # Prints list of Datasets avaible in Tensorflow Datasets Module
105
+
106
+ dataset_list = tfds.list_builders()
107
+ dataset_list[:10]
108
+
109
+ """### **Importing Food101 Dataset**
110
+
111
+ **Disclaimer :**
112
+ The below cell will take time to run, as it will be downloading
113
+ **4.65GB data** from **Tensorflow Datasets Module**.
114
+
115
+ So do check if you have enough **Disk Space** and **Bandwidth Cap** to run the below cell.
116
+ """
117
+
118
+ (train_data, test_data), ds_info = tfds.load(name='food101',
119
+ split=['train', 'validation'],
120
+ shuffle_files=False,
121
+ as_supervised=True,
122
+ with_info=True)
123
+
124
+ """## **Becoming One with the Data**
125
+
126
+ One of the most important steps in building any ML or DL Model is to **become one with the data**.
127
+
128
+ Once you get the gist of what type of data your dealing with and how it is structured, everything else will fall in place.
129
+ """
130
+
131
+ ds_info.features
132
+
133
+ class_names = ds_info.features['label'].names
134
+ class_names[:10]
135
+
136
+ train_one_sample = train_data.take(1)
137
+
138
+ train_one_sample
139
+
140
+ for image, label in train_one_sample:
141
+ print(f"""
142
+ Image Shape : {image.shape}
143
+ Image Datatype : {image.dtype}
144
+ Class : {class_names[label.numpy()]}
145
+ """)
146
+
147
+ image[:2]
148
+
149
+ tf.reduce_min(image), tf.reduce_max(image)
150
+
151
+ plt.imshow(image)
152
+ plt.title(class_names[label.numpy()])
153
+ plt.axis(False);
154
+
155
+ """## **Preprocessing the Data**
156
+
157
+ Since we've downloaded the data from TensorFlow Datasets, there are a couple of preprocessing steps we have to take before it's ready to model.
158
+
159
+ More specifically, our data is currently:
160
+
161
+ * In `uint8` data type
162
+ * Comprised of all differnet sized tensors (different sized images)
163
+ * Not scaled (the pixel values are between 0 & 255)
164
+
165
+ Whereas, models like data to be:
166
+
167
+ * In `float32` data type
168
+ * Have all of the same size tensors (batches require all tensors have the same shape, e.g. `(224, 224, 3)`)
169
+ * Scaled (values between 0 & 1), also called normalized
170
+
171
+ To take care of these, we'll create a `preprocess_img()` function which:
172
+
173
+ * Resizes an input image tensor to a specified size using [`tf.image.resize()`](https://www.tensorflow.org/api_docs/python/tf/image/resize)
174
+ * Converts an input image tensor's current datatype to `tf.float32` using [`tf.cast()`](https://www.tensorflow.org/api_docs/python/tf/cast)
175
+ """
176
+
177
+ def preprocess_img(image, label, img_size=224):
178
+ image = tf.image.resize(image, [img_size, img_size])
179
+ image = tf.cast(image, tf.float16)
180
+ return image, label
181
+
182
+ # Trying the preprocess function on a single image
183
+
184
+ preprocessed_img = preprocess_img(image, label)[0]
185
+ preprocessed_img
186
+
187
+ train_data = train_data.map(preprocess_img, tf.data.AUTOTUNE)
188
+ train_data = train_data.shuffle(buffer_size=1000).batch(32).prefetch(tf.data.AUTOTUNE)
189
+
190
+ test_data = test_data.map(preprocess_img, tf.data.AUTOTUNE)
191
+ test_data = test_data.batch(32)
192
+
193
+ train_data
194
+
195
+ test_data
196
+
197
+ """## **Building the Model : EfficientNetB1**
198
+
199
+
200
+ ### **Getting the Callbacks ready**
201
+ As we are dealing with a complex Neural Network (EfficientNetB0) its a good practice to have few call backs set up. Few callbacks I will be using throughtout this Notebook are :
202
+ * **TensorBoard Callback :** TensorBoard provides the visualization and tooling needed for machine learning experimentation
203
+
204
+ * **EarlyStoppingCallback :** Used to stop training when a monitored metric has stopped improving.
205
+
206
+ * **ReduceLROnPlateau :** Reduce learning rate when a metric has stopped improving.
207
+
208
+
209
+ We already have **TensorBoardCallBack** function setup in out helper function, all we have to do is get other callbacks ready.
210
+ """
211
+
212
+ from helper_function import create_tensorboard_callback
213
+
214
+ # EarlyStopping Callback
215
+
216
+ early_stopping_callback = tf.keras.callbacks.EarlyStopping(restore_best_weights=True, patience=3, verbose=1, monitor="val_accuracy")
217
+
218
+ # ReduceLROnPlateau Callback
219
+
220
+ lower_lr = tf.keras.callbacks.ReduceLROnPlateau(factor=0.2,
221
+ monitor='val_accuracy',
222
+ min_lr=1e-7,
223
+ patience=0,
224
+ verbose=1)
225
+
226
+ """
227
+
228
+ ### **Mixed Precision Training**
229
+ Mixed precision is used for training neural networks, reducing training time and memory requirements without affecting the model performance.
230
+
231
+ More Specifically, in **Mixed Precision** we will setting global dtype as `mixed_float16`. Because modern accelerators can run operations faster in the 16-bit dtypes, as they have specialized hardware to run 16-bit computations and 16-bit dtypes can be read from memory faster.
232
+
233
+ To know more about Mixed Precision, [**click here**](https://www.tensorflow.org/guide/mixed_precision)"""
234
+
235
+ from tensorflow.keras import mixed_precision
236
+ mixed_precision.set_global_policy(policy='mixed_float16')
237
+
238
+ mixed_precision.global_policy()
239
+
240
+ """
241
+
242
+ ### **Building the Model**"""
243
+
244
+ from tensorflow.keras import layers
245
+ from tensorflow.keras.layers.experimental import preprocessing
246
+
247
+ # Create base model
248
+ input_shape = (224, 224, 3)
249
+ base_model = tf.keras.applications.EfficientNetB1(include_top=False)
250
+
251
+ # Input and Data Augmentation
252
+ inputs = layers.Input(shape=input_shape, name="input_layer")
253
+ x = base_model(inputs)
254
+
255
+ x = layers.GlobalAveragePooling2D(name="pooling_layer")(x)
256
+ x = layers.Dropout(.3)(x)
257
+
258
+ x = layers.Dense(len(class_names))(x)
259
+ outputs = layers.Activation("softmax")(x)
260
+ model = tf.keras.Model(inputs, outputs)
261
+
262
+ # Compiling the model
263
+ model.compile(loss="sparse_categorical_crossentropy",
264
+ optimizer=tf.keras.optimizers.Adam(0.001),
265
+ metrics=["accuracy"])
266
+
267
+ model.summary()
268
+
269
+ history = model.fit(train_data,
270
+ epochs=50,
271
+ steps_per_epoch=len(train_data),
272
+ validation_data=test_data,
273
+ validation_steps=int(0.15 * len(test_data)),
274
+ callbacks=[create_tensorboard_callback("training-logs", "EfficientNetB1-"),
275
+ early_stopping_callback,
276
+ lower_lr])
277
+
278
+ # Saving the model
279
+ model.save("/content/drive/My Drive/FinalModel.hdf5")
280
+
281
+ # Saving the model
282
+ model.save("FoodVision.hdf5")
283
+
284
+ plot_loss_curves(history)
285
+
286
+ model.evaluate(test_data)
287
+
288
+ """## **Evaluating our Model**"""
289
+
290
+ # Commented out IPython magic to ensure Python compatibility.
291
+ # %load_ext tensorboard
292
+ # %tensorboard --logdir training-logs
293
+
294
+ pred_probs = model.predict(test_data, verbose=1)
295
+ len(pred_probs), pred_probs.shape
296
+
297
+ pred_classes = pred_probs.argmax(axis=1)
298
+ pred_classes[:10], len(pred_classes), pred_classes.shape
299
+
300
+ # Getting true labels for the test_data
301
+
302
+ y_labels = []
303
+ test_images = []
304
+ for images, labels in test_data.unbatch():
305
+ y_labels.append(labels.numpy())
306
+ y_labels[:10]
307
+
308
+ # Predicted Labels vs. True Labels
309
+ pred_classes==y_labels
310
+
311
+ """### **Sklearn's Accuracy Score**"""
312
+
313
+ from sklearn.metrics import accuracy_score
314
+
315
+ sklearn_acc = accuracy_score(y_labels, pred_classes)
316
+ sklearn_acc
317
+
318
+ """### **Confusion Matrix**
319
+ A confusion matrix is a table that is often used to describe the performance of a classification model (or "classifier") on a set of test data for which the true values are known
320
+ """
321
+
322
+ cm = tf.math.confusion_matrix(y_labels, pred_classes)
323
+
324
+ plt.figure(figsize = (100, 100));
325
+ sn.heatmap(cm, annot=True,
326
+ fmt='',
327
+ cmap='Purples');
328
+
329
+ """### **Model's Class-wise Accuracy Score**"""
330
+
331
+ from sklearn.metrics import classification_report
332
+ report = (classification_report(y_labels, pred_classes, output_dict=True))
333
+
334
+ # Create empty dictionary
335
+ class_f1_scores = {}
336
+ # Loop through classification report items
337
+ for k, v in report.items():
338
+ if k == "accuracy": # stop once we get to accuracy key
339
+ break
340
+ else:
341
+ # Append class names and f1-scores to new dictionary
342
+ class_f1_scores[class_names[int(k)]] = v["f1-score"]
343
+ class_f1_scores
344
+
345
+ report_df = pd.DataFrame(class_f1_scores, index = ['f1-scores']).T
346
+
347
+ report_df = report_df.sort_values("f1-scores", ascending=True)
348
+
349
+ import matplotlib.pyplot as plt
350
+
351
+ fig, ax = plt.subplots(figsize=(12, 25))
352
+ scores = ax.barh(range(len(report_df)), report_df["f1-scores"].values)
353
+ ax.set_yticks(range(len(report_df)))
354
+ plt.axvline(x=0.85, linestyle='--', color='r')
355
+ ax.set_yticklabels(class_names)
356
+ ax.set_xlabel("f1-score")
357
+ ax.set_title("F1-Scores for 10 Different Classes")
358
+ ax.invert_yaxis(); # reverse the order
359
+
360
+ """### **Predicting on our own Custom images**
361
+
362
+ Once we have our model ready, its cruicial to evaluate it on our custom data : the data our model has never seen.
363
+
364
+ Training and evaluating a model on train and test data is cool, but making predictions on our own realtime images is another level.
365
+
366
+
367
+ """
368
+
369
+ import os
370
+
371
+ directory_path = "/content/drive/MyDrive/FoodVisionModels/Custom Images"
372
+ os.makedirs(directory_path, exist_ok=True)
373
+
374
+ custom_food_images = [directory_path + img_path for img_path in os.listdir(directory_path)]
375
+ custom_food_images
376
+
377
+ import os
378
+ import matplotlib.pyplot as plt
379
+
380
+ def pred_plot_custom(folder_path):
381
+ custom_food_images = [folder_path + img_path for img_path in os.listdir(folder_path) if os.path.isfile(os.path.join(folder_path, img_path))]
382
+
383
+ for img in custom_food_images:
384
+ img = load_and_prep_image(img, scale=False)
385
+ pred_prob = model.predict(tf.expand_dims(img, axis=0))
386
+ pred_class = class_names[pred_prob.argmax()]
387
+ top_5_i = (pred_prob.argsort())[0][-5:][::-1]
388
+ values = pred_prob[0][top_5_i]
389
+ labels = []
390
+
391
+ for x in range(5):
392
+ labels.append(class_names[top_5_i[x]])
393
+
394
+ fig, ax = plt.subplots(1, 2, figsize=(15, 5))
395
+
396
+ # Plotting Image
397
+ ax[0].imshow(img/255.)
398
+ ax[0].set_title(f"Prediction: {pred_class} Probability: {pred_prob.max():.2f}")
399
+ ax[0].axis('off')
400
+
401
+ # Plotting Models Top 5 Predictions
402
+ ax[1].bar(labels, values, color='orange')
403
+ ax[1].set_title('Top 5 Predictions')
404
+
405
+ plt.show()
406
+
407
+ pred_plot_custom("/content/drive/MyDrive/FoodVisionModels/Custom Images/")
408
+
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ alt
2
+ streamlit
3
+ pandas
4
+ tensorflow
5
+ altair
6
+ time
7
+ datetime
8
+ numpy
9
+ matplotlib
10
+ seaborn
11
+ tensorflow_datasets
sample_images/1190_pic_main02.jpg ADDED
sample_images/1652733217Grilled20Sirloin20Tri20Tip-a61e7e79a54448e2a68252ea222719c7.jpeg ADDED
sample_images/download.jpeg ADDED
sample_images/ian-dooley-TLD6iCOlyb0-unsplash.jpg ADDED
sample_images/istockphoto-945057664-170667a.jpg ADDED
sample_images/pizza.jpeg ADDED
sample_images/sushi.jpg ADDED

Git LFS Details

  • SHA256: a79c349119932683b9c357653943afe6b60730786f28aa1849491c52329cc211
  • Pointer size: 132 Bytes
  • Size of remote file: 5.31 MB
utils.py ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datetime
2
+ import tensorflow as tf
3
+ import numpy as np
4
+
5
+ class_names = ['apple_pie',
6
+ 'baby_back_ribs',
7
+ 'baklava',
8
+ 'beef_carpaccio',
9
+ 'beef_tartare',
10
+ 'beet_salad',
11
+ 'beignets',
12
+ 'bibimbap',
13
+ 'bread_pudding',
14
+ 'breakfast_burrito',
15
+ 'bruschetta',
16
+ 'caesar_salad',
17
+ 'cannoli',
18
+ 'caprese_salad',
19
+ 'carrot_cake',
20
+ 'ceviche',
21
+ 'cheesecake',
22
+ 'cheese_plate',
23
+ 'chicken_curry',
24
+ 'chicken_quesadilla',
25
+ 'chicken_wings',
26
+ 'chocolate_cake',
27
+ 'chocolate_mousse',
28
+ 'churros',
29
+ 'clam_chowder',
30
+ 'club_sandwich',
31
+ 'crab_cakes',
32
+ 'creme_brulee',
33
+ 'croque_madame',
34
+ 'cup_cakes',
35
+ 'deviled_eggs',
36
+ 'donuts',
37
+ 'dumplings',
38
+ 'edamame',
39
+ 'eggs_benedict',
40
+ 'escargots',
41
+ 'falafel',
42
+ 'filet_mignon',
43
+ 'fish_and_chips',
44
+ 'foie_gras',
45
+ 'french_fries',
46
+ 'french_onion_soup',
47
+ 'french_toast',
48
+ 'fried_calamari',
49
+ 'fried_rice',
50
+ 'frozen_yogurt',
51
+ 'garlic_bread',
52
+ 'gnocchi',
53
+ 'greek_salad',
54
+ 'grilled_cheese_sandwich',
55
+ 'grilled_salmon',
56
+ 'guacamole',
57
+ 'gyoza',
58
+ 'hamburger',
59
+ 'hot_and_sour_soup',
60
+ 'hot_dog',
61
+ 'huevos_rancheros',
62
+ 'hummus',
63
+ 'ice_cream',
64
+ 'lasagna',
65
+ 'lobster_bisque',
66
+ 'lobster_roll_sandwich',
67
+ 'macaroni_and_cheese',
68
+ 'macarons',
69
+ 'miso_soup',
70
+ 'mussels',
71
+ 'nachos',
72
+ 'omelette',
73
+ 'onion_rings',
74
+ 'oysters',
75
+ 'pad_thai',
76
+ 'paella',
77
+ 'pancakes',
78
+ 'panna_cotta',
79
+ 'peking_duck',
80
+ 'pho',
81
+ 'pizza',
82
+ 'pork_chop',
83
+ 'poutine',
84
+ 'prime_rib',
85
+ 'pulled_pork_sandwich',
86
+ 'ramen',
87
+ 'ravioli',
88
+ 'red_velvet_cake',
89
+ 'risotto',
90
+ 'samosa',
91
+ 'sashimi',
92
+ 'scallops',
93
+ 'seaweed_salad',
94
+ 'shrimp_and_grits',
95
+ 'spaghetti_bolognese',
96
+ 'spaghetti_carbonara',
97
+ 'spring_rolls',
98
+ 'steak',
99
+ 'strawberry_shortcake',
100
+ 'sushi',
101
+ 'tacos',
102
+ 'takoyaki',
103
+ 'tiramisu',
104
+ 'tuna_tartare',
105
+ 'waffles']
106
+
107
+
108
+ def get_classes():
109
+ return class_names
110
+
111
+ def load_and_prep(image, shape=224, scale=False):
112
+ image = tf.image.decode_image(image, channels=3)
113
+ image = tf.image.resize(image, size=([shape, shape]))
114
+ if scale:
115
+ image = image/255.
116
+ return image
117
+
118
+ def preprocess_data(data):
119
+ return np.asarray(data).astype(np.float32)