winwithakash commited on
Commit
8add151
1 Parent(s): 282796a
Files changed (10) hide show
  1. FetchRecipe.py +16 -0
  2. Procfile.txt +1 -0
  3. README.md +31 -5
  4. RecipeData.py +69 -0
  5. app.py +137 -0
  6. efficientnet_b0.pt +3 -0
  7. gitattributes.txt +34 -0
  8. helper_functions.py +288 -0
  9. requirements.txt +9 -0
  10. utils.py +140 -0
FetchRecipe.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+
4
+ url = "https://rapidapi.com/spoonacular/api/recipe-food-nutrition"
5
+
6
+ querystring = {"q":"chicken soup"}
7
+
8
+ headers = {
9
+ 'x-rapidapi-host': "rapidapi.com/spoonacular/api/",
10
+ 'x-rapidapi-key': "1f9b61c859214d3ab6a00a6d82ec5a85"
11
+ }
12
+
13
+ response = requests.request("GET", url, headers=headers, params=querystring)
14
+ json_data = json.loads(response.text)
15
+
16
+ print(json_data)
Procfile.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ web: sh setup.sh && streamlit run app.py
README.md CHANGED
@@ -1,12 +1,38 @@
1
  ---
2
- title: Seefood Know Your Receipe
3
- emoji: 😻
4
- colorFrom: green
5
  colorTo: pink
6
  sdk: streamlit
7
- sdk_version: 1.15.2
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: SeeFood
3
+ emoji: 🐨
4
+ colorFrom: yellow
5
  colorTo: pink
6
  sdk: streamlit
7
+ sdk_version: 1.10.0
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
+ # Configuration
13
+
14
+ `title`: _string_
15
+ Display title for the Space
16
+
17
+ `emoji`: _string_
18
+ Space emoji (emoji-only character allowed)
19
+
20
+ `colorFrom`: _string_
21
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
22
+
23
+ `colorTo`: _string_
24
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
+
26
+ `sdk`: _string_
27
+ Can be either `gradio` or `streamlit`
28
+
29
+ `sdk_version` : _string_
30
+ Only applicable for `streamlit` SDK.
31
+ See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
32
+
33
+ `app_file`: _string_
34
+ Path to your main application file (which contains either `gradio` or `streamlit` Python code).
35
+ Path is relative to the root of the repository.
36
+
37
+ `pinned`: _boolean_
38
+ Whether the Space stays on top of your list.
RecipeData.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+ import random
4
+
5
+ API_KEY = '1f9b61c859214d3ab6a00a6d82ec5a85'
6
+
7
+ def fetchRecipeData(foodName, apiKey = API_KEY):
8
+ recipe = {}
9
+
10
+ # Fetching recipe Details from food name
11
+ url = f"https://api.spoonacular.com/recipes/search?query={foodName}&apiKey={apiKey}"
12
+ response = requests.get(url)
13
+ json_data = response.json()
14
+
15
+ # saving responce code
16
+ response_status_code = response.status_code
17
+
18
+ # selecting random recipe from fetched recipes
19
+ recipe_list = json_data['results']
20
+ foodRecipe = random.choice(recipe_list)
21
+
22
+ recipe_ID = foodRecipe['id']
23
+
24
+ # getting recipe details from api using recipe id
25
+ url = f"https://api.spoonacular.com/recipes/{recipe_ID}/information?apiKey={apiKey}&includeNutrition=true"
26
+ recipe_response = requests.get(url)
27
+ all_recipe_json_data = recipe_response.json()
28
+
29
+ # recipe instructions
30
+ recipe_instructions = preprocessing_instructions(all_recipe_json_data['instructions'])
31
+
32
+ # recipe summary
33
+ recipe_summary = all_recipe_json_data['summary']
34
+
35
+ # recipe ingredients
36
+ recipe_Ingredients = all_recipe_json_data['extendedIngredients']
37
+ for i, dict in enumerate(recipe_Ingredients):
38
+ recipe_Ingredients[i] = dict['originalName']
39
+ Ingredients = ', '.join(recipe_Ingredients)
40
+
41
+ # caloric Breakdow of recipe
42
+ recipe_caloric_breakdown = all_recipe_json_data['nutrition']['caloricBreakdown']
43
+
44
+ # storing all values in recipe dict
45
+ recipe['id'] = recipe_ID
46
+ recipe['title'] = foodRecipe['title']
47
+ recipe['readyTime'] = foodRecipe['readyInMinutes']
48
+ recipe['soureUrl'] = foodRecipe['sourceUrl']
49
+
50
+ recipe['instructions'] = recipe_instructions
51
+
52
+ recipe['ingridents'] = recipe_Ingredients
53
+
54
+ recipe_summary = recipe_summary.replace('<b>', '')
55
+ recipe_summary = recipe_summary.replace('</b>', '')
56
+ recipe['summary'] = recipe_summary
57
+
58
+ recipe['percentProtein'] = recipe_caloric_breakdown['percentProtein']
59
+ recipe['percentFat'] = recipe_caloric_breakdown['percentFat']
60
+ recipe['percentCarbs'] = recipe_caloric_breakdown['percentCarbs']
61
+
62
+ return response_status_code, recipe
63
+
64
+
65
+ def preprocessing_instructions(text):
66
+ word_to_remove = ['<ol>', '</ol>', '<li>', '</li>']
67
+ for word in word_to_remove:
68
+ text = text.replace(word, '')
69
+ return text
app.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import numpy as np
3
+ import time
4
+ import PIL
5
+ import PIL.Image as Image
6
+ from utils import make_pred_outside_india
7
+ from utils import getmodel_outside_india
8
+ from utils import getmodel_india
9
+ from utils import load_prepare_img
10
+ from utils import food_nofood_pred
11
+ import sys
12
+ from RecipeData import fetchRecipeData
13
+
14
+ IMG_SIZE = (224, 224)
15
+ model_V2 = 'efficientnet_b0.pt'
16
+ model_V1 = 'indian_efficientnet_b0.pt'
17
+
18
+
19
+ @st.cache()
20
+ def model_prediction(model_path, img_file, rescale,selected_location):
21
+ input_img, device = load_prepare_img(img_file)
22
+ if(selected_location=='Outside India'):
23
+ model = getmodel_outside_india(model_path)
24
+ prediction = make_pred_outside_india(input_img, model, device, selected_location)
25
+ elif(selected_location=='India'):
26
+ model = getmodel_india(model_path)
27
+
28
+ prediction = make_pred_outside_india(input_img, model, device, selected_location)
29
+ print(prediction)
30
+ sorceCode, recipe_data = fetchRecipeData(prediction)
31
+ return prediction, sorceCode, recipe_data
32
+
33
+ def main():
34
+ st.set_page_config(
35
+ page_title="SeeFood",
36
+ page_icon="🍔 Know Your Receipe",
37
+ layout="wide",
38
+ initial_sidebar_state="expanded"
39
+ )
40
+
41
+ st.title('SeeFood🍔')
42
+ st.write('Upload a food image and get the recipe for that food and other details of that food')
43
+
44
+ col1, col2 = st.columns(2)
45
+
46
+ with col1:
47
+ # image uploading button
48
+ uploaded_file = st.file_uploader("Choose a file")
49
+ selected_location = st.selectbox('Select loaction',('India', 'Outside India'), index=1)
50
+ if uploaded_file is not None:
51
+ display_img = uploaded_file.read()
52
+ uploaded_img = Image.open(uploaded_file)
53
+ col2.image(display_img, width=500)
54
+
55
+
56
+
57
+
58
+
59
+ predict = st.button('Get Recipe!')
60
+
61
+
62
+
63
+ if predict:
64
+ if uploaded_file is not None:
65
+ with st.spinner('getting image type'):
66
+ img_type=food_nofood_pred(uploaded_img)
67
+ print(img_type)
68
+
69
+ if(img_type=='food'):
70
+
71
+ with st.spinner('Please Wait 👩‍🍳'):
72
+
73
+ # setting model and rescalling
74
+ if selected_location == 'India':
75
+ pred_model = model_V1
76
+ pred_rescale = True
77
+ if selected_location == 'Outside India':
78
+ pred_model = model_V2
79
+ pred_rescale =True
80
+
81
+
82
+ # makeing prediction and fetching food recipe form api
83
+ food, source_code, recipe_data = model_prediction(pred_model, uploaded_img, pred_rescale,selected_location)
84
+
85
+ # asssigning caleoric breakdown data
86
+ percent_Protein = recipe_data['percentProtein']
87
+ percent_fat = recipe_data['percentFat']
88
+ percent_carbs = recipe_data['percentCarbs']
89
+
90
+ # food name message
91
+ col1.success(f"It's an {food}")
92
+
93
+ if source_code == 200:
94
+ # desplay food recipe
95
+ st.header(recipe_data['title']+" Recipe")
96
+
97
+ col3, col4 = st.columns(2)
98
+
99
+ with col3:
100
+ # Ingridents of recipie
101
+ st.subheader('Ingredients')
102
+ # st.info(recipe_data['ingridents'])
103
+ for i in recipe_data['ingridents']:
104
+ st.info(f"{i}")
105
+ # Inctuction for recipe
106
+ with col4:
107
+ st.subheader('Instructions')
108
+ st.info(recipe_data['instructions'])
109
+ # st.subheader('Caloric Breakdown')
110
+ '''
111
+ ## Caloric Breakdown
112
+ '''
113
+ st.success(f'''
114
+ * Protien: {percent_Protein}%
115
+ * Fat: {percent_fat}%
116
+ * Carbohydrates: {percent_carbs}%
117
+ ''')
118
+
119
+
120
+ else:
121
+ st.error('Something went wrong please try again :(')
122
+
123
+ elif(img_type=='not food'):
124
+
125
+ # Ingridents of recipie
126
+ st.warning('This is not food image Please try again!!')
127
+
128
+
129
+
130
+ else:
131
+ st.warning('Please Upload Image')
132
+
133
+
134
+
135
+
136
+ if __name__=='__main__':
137
+ main()
efficientnet_b0.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb5a1224fdaf0fdda08749bc702f37f4d2ac1d9e95949aa78d5110c3e6ce93c
3
+ size 16840433
gitattributes.txt ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tflite filter=lfs diff=lfs merge=lfs -text
29
+ *.tgz filter=lfs diff=lfs merge=lfs -text
30
+ *.wasm filter=lfs diff=lfs merge=lfs -text
31
+ *.xz filter=lfs diff=lfs merge=lfs -text
32
+ *.zip filter=lfs diff=lfs merge=lfs -text
33
+ *.zst filter=lfs diff=lfs merge=lfs -text
34
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
helper_functions.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### We create a bunch of helpful functions throughout the course.
2
+ ### Storing them here so they're easily accessible.
3
+
4
+ import tensorflow as tf
5
+
6
+ # Create a function to import an image and resize it to be able to be used with our model
7
+ def load_and_prep_image(filename, img_shape=224, scale=True):
8
+ """
9
+ Reads in an image from filename, turns it into a tensor and reshapes into
10
+ (224, 224, 3).
11
+
12
+ Parameters
13
+ ----------
14
+ filename (str): string filename of target image
15
+ img_shape (int): size to resize target image to, default 224
16
+ scale (bool): whether to scale pixel values to range(0, 1), default True
17
+ """
18
+ # Read in the image
19
+ img = tf.io.read_file(filename)
20
+ # Decode it into a tensor
21
+ img = tf.image.decode_jpeg(img)
22
+ # Resize the image
23
+ img = tf.image.resize(img, [img_shape, img_shape])
24
+ if scale:
25
+ # Rescale the image (get all values between 0 and 1)
26
+ return img/255.
27
+ else:
28
+ return img
29
+
30
+ # Note: The following confusion matrix code is a remix of Scikit-Learn's
31
+ # plot_confusion_matrix function - https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html
32
+ import itertools
33
+ import matplotlib.pyplot as plt
34
+ import numpy as np
35
+ from sklearn.metrics import confusion_matrix
36
+
37
+ # Our function needs a different name to sklearn's plot_confusion_matrix
38
+ def make_confusion_matrix(y_true, y_pred, classes=None, figsize=(10, 10), text_size=15, norm=False, savefig=False):
39
+ """Makes a labelled confusion matrix comparing predictions and ground truth labels.
40
+
41
+ If classes is passed, confusion matrix will be labelled, if not, integer class values
42
+ will be used.
43
+
44
+ Args:
45
+ y_true: Array of truth labels (must be same shape as y_pred).
46
+ y_pred: Array of predicted labels (must be same shape as y_true).
47
+ classes: Array of class labels (e.g. string form). If `None`, integer labels are used.
48
+ figsize: Size of output figure (default=(10, 10)).
49
+ text_size: Size of output figure text (default=15).
50
+ norm: normalize values or not (default=False).
51
+ savefig: save confusion matrix to file (default=False).
52
+
53
+ Returns:
54
+ A labelled confusion matrix plot comparing y_true and y_pred.
55
+
56
+ Example usage:
57
+ make_confusion_matrix(y_true=test_labels, # ground truth test labels
58
+ y_pred=y_preds, # predicted labels
59
+ classes=class_names, # array of class label names
60
+ figsize=(15, 15),
61
+ text_size=10)
62
+ """
63
+ # Create the confustion matrix
64
+ cm = confusion_matrix(y_true, y_pred)
65
+ cm_norm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] # normalize it
66
+ n_classes = cm.shape[0] # find the number of classes we're dealing with
67
+
68
+ # Plot the figure and make it pretty
69
+ fig, ax = plt.subplots(figsize=figsize)
70
+ cax = ax.matshow(cm, cmap=plt.cm.Blues) # colors will represent how 'correct' a class is, darker == better
71
+ fig.colorbar(cax)
72
+
73
+ # Are there a list of classes?
74
+ if classes:
75
+ labels = classes
76
+ else:
77
+ labels = np.arange(cm.shape[0])
78
+
79
+ # Label the axes
80
+ ax.set(title="Confusion Matrix",
81
+ xlabel="Predicted label",
82
+ ylabel="True label",
83
+ xticks=np.arange(n_classes), # create enough axis slots for each class
84
+ yticks=np.arange(n_classes),
85
+ xticklabels=labels, # axes will labeled with class names (if they exist) or ints
86
+ yticklabels=labels)
87
+
88
+ # Make x-axis labels appear on bottom
89
+ ax.xaxis.set_label_position("bottom")
90
+ ax.xaxis.tick_bottom()
91
+
92
+ # Set the threshold for different colors
93
+ threshold = (cm.max() + cm.min()) / 2.
94
+
95
+ # Plot the text on each cell
96
+ for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
97
+ if norm:
98
+ plt.text(j, i, f"{cm[i, j]} ({cm_norm[i, j]*100:.1f}%)",
99
+ horizontalalignment="center",
100
+ color="white" if cm[i, j] > threshold else "black",
101
+ size=text_size)
102
+ else:
103
+ plt.text(j, i, f"{cm[i, j]}",
104
+ horizontalalignment="center",
105
+ color="white" if cm[i, j] > threshold else "black",
106
+ size=text_size)
107
+
108
+ # Save the figure to the current working directory
109
+ if savefig:
110
+ fig.savefig("confusion_matrix.png")
111
+
112
+ # Make a function to predict on images and plot them (works with multi-class)
113
+ def pred_and_plot(model, filename, class_names):
114
+ """
115
+ Imports an image located at filename, makes a prediction on it with
116
+ a trained model and plots the image with the predicted class as the title.
117
+ """
118
+ # Import the target image and preprocess it
119
+ img = load_and_prep_image(filename)
120
+
121
+ # Make a prediction
122
+ pred = model.predict(tf.expand_dims(img, axis=0))
123
+
124
+ # Get the predicted class
125
+ if len(pred[0]) > 1: # check for multi-class
126
+ pred_class = class_names[pred.argmax()] # if more than one output, take the max
127
+ else:
128
+ pred_class = class_names[int(tf.round(pred)[0][0])] # if only one output, round
129
+
130
+ # Plot the image and predicted class
131
+ plt.imshow(img)
132
+ plt.title(f"Prediction: {pred_class}")
133
+ plt.axis(False);
134
+
135
+ import datetime
136
+
137
+ def create_tensorboard_callback(dir_name, experiment_name):
138
+ """
139
+ Creates a TensorBoard callback instand to store log files.
140
+
141
+ Stores log files with the filepath:
142
+ "dir_name/experiment_name/current_datetime/"
143
+
144
+ Args:
145
+ dir_name: target directory to store TensorBoard log files
146
+ experiment_name: name of experiment directory (e.g. efficientnet_model_1)
147
+ """
148
+ log_dir = dir_name + "/" + experiment_name + "/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
149
+ tensorboard_callback = tf.keras.callbacks.TensorBoard(
150
+ log_dir=log_dir
151
+ )
152
+ print(f"Saving TensorBoard log files to: {log_dir}")
153
+ return tensorboard_callback
154
+
155
+ # Plot the validation and training data separately
156
+ import matplotlib.pyplot as plt
157
+
158
+ def plot_loss_curves(history):
159
+ """
160
+ Returns separate loss curves for training and validation metrics.
161
+
162
+ Args:
163
+ history: TensorFlow model History object (see: https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/History)
164
+ """
165
+ loss = history.history['loss']
166
+ val_loss = history.history['val_loss']
167
+
168
+ accuracy = history.history['accuracy']
169
+ val_accuracy = history.history['val_accuracy']
170
+
171
+ epochs = range(len(history.history['loss']))
172
+
173
+ # Plot loss
174
+ plt.plot(epochs, loss, label='training_loss')
175
+ plt.plot(epochs, val_loss, label='val_loss')
176
+ plt.title('Loss')
177
+ plt.xlabel('Epochs')
178
+ plt.legend()
179
+
180
+ # Plot accuracy
181
+ plt.figure()
182
+ plt.plot(epochs, accuracy, label='training_accuracy')
183
+ plt.plot(epochs, val_accuracy, label='val_accuracy')
184
+ plt.title('Accuracy')
185
+ plt.xlabel('Epochs')
186
+ plt.legend();
187
+
188
+ def compare_historys(original_history, new_history, initial_epochs=5):
189
+ """
190
+ Compares two TensorFlow model History objects.
191
+
192
+ Args:
193
+ original_history: History object from original model (before new_history)
194
+ new_history: History object from continued model training (after original_history)
195
+ initial_epochs: Number of epochs in original_history (new_history plot starts from here)
196
+ """
197
+
198
+ # Get original history measurements
199
+ acc = original_history.history["accuracy"]
200
+ loss = original_history.history["loss"]
201
+
202
+ val_acc = original_history.history["val_accuracy"]
203
+ val_loss = original_history.history["val_loss"]
204
+
205
+ # Combine original history with new history
206
+ total_acc = acc + new_history.history["accuracy"]
207
+ total_loss = loss + new_history.history["loss"]
208
+
209
+ total_val_acc = val_acc + new_history.history["val_accuracy"]
210
+ total_val_loss = val_loss + new_history.history["val_loss"]
211
+
212
+ # Make plots
213
+ plt.figure(figsize=(8, 8))
214
+ plt.subplot(2, 1, 1)
215
+ plt.plot(total_acc, label='Training Accuracy')
216
+ plt.plot(total_val_acc, label='Validation Accuracy')
217
+ plt.plot([initial_epochs-1, initial_epochs-1],
218
+ plt.ylim(), label='Start Fine Tuning') # reshift plot around epochs
219
+ plt.legend(loc='lower right')
220
+ plt.title('Training and Validation Accuracy')
221
+
222
+ plt.subplot(2, 1, 2)
223
+ plt.plot(total_loss, label='Training Loss')
224
+ plt.plot(total_val_loss, label='Validation Loss')
225
+ plt.plot([initial_epochs-1, initial_epochs-1],
226
+ plt.ylim(), label='Start Fine Tuning') # reshift plot around epochs
227
+ plt.legend(loc='upper right')
228
+ plt.title('Training and Validation Loss')
229
+ plt.xlabel('epoch')
230
+ plt.show()
231
+
232
+ # Create function to unzip a zipfile into current working directory
233
+ # (since we're going to be downloading and unzipping a few files)
234
+ import zipfile
235
+
236
+ def unzip_data(filename):
237
+ """
238
+ Unzips filename into the current working directory.
239
+
240
+ Args:
241
+ filename (str): a filepath to a target zip folder to be unzipped.
242
+ """
243
+ zip_ref = zipfile.ZipFile(filename, "r")
244
+ zip_ref.extractall()
245
+ zip_ref.close()
246
+
247
+ # Walk through an image classification directory and find out how many files (images)
248
+ # are in each subdirectory.
249
+ import os
250
+
251
+ def walk_through_dir(dir_path):
252
+ """
253
+ Walks through dir_path returning its contents.
254
+
255
+ Args:
256
+ dir_path (str): target directory
257
+
258
+ Returns:
259
+ A print out of:
260
+ number of subdiretories in dir_path
261
+ number of images (files) in each subdirectory
262
+ name of each subdirectory
263
+ """
264
+ for dirpath, dirnames, filenames in os.walk(dir_path):
265
+ print(f"There are {len(dirnames)} directories and {len(filenames)} images in '{dirpath}'.")
266
+
267
+ # Function to evaluate: accuracy, precision, recall, f1-score
268
+ from sklearn.metrics import accuracy_score, precision_recall_fscore_support
269
+
270
+ def calculate_results(y_true, y_pred):
271
+ """
272
+ Calculates model accuracy, precision, recall and f1 score of a binary classification model.
273
+
274
+ Args:
275
+ y_true: true labels in the form of a 1D array
276
+ y_pred: predicted labels in the form of a 1D array
277
+
278
+ Returns a dictionary of accuracy, precision, recall, f1-score.
279
+ """
280
+ # Calculate model accuracy
281
+ model_accuracy = accuracy_score(y_true, y_pred) * 100
282
+ # Calculate model precision, recall and f1 score using "weighted average
283
+ model_precision, model_recall, model_f1, _ = precision_recall_fscore_support(y_true, y_pred, average="weighted")
284
+ model_results = {"accuracy": model_accuracy,
285
+ "precision": model_precision,
286
+ "recall": model_recall,
287
+ "f1": model_f1}
288
+ return model_results
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ streamlit>=1.0.0
2
+ numpy>=1.9.2
3
+ pandas>=0.19
4
+ tensorflow==2.6.0
5
+ torch>=1.7.0
6
+ matplotlib>=1.4.3
7
+ scikit-learn>=0.18
8
+ timm
9
+ transformers
utils.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ import json
4
+ import torch
5
+ import timm
6
+ import torch.nn as nn
7
+ import torchvision
8
+ from torchvision import transforms, datasets, models
9
+ import torch.nn.functional as F
10
+ import PIL
11
+ import PIL.Image as Image
12
+ import numpy as np
13
+ from transformers import CLIPProcessor, CLIPModel
14
+
15
+
16
+ classes_outside_india = ['apple pie', 'baby back ribs', 'baklava', 'beef carpaccio', 'beef tartare',
17
+ 'beet salad', 'beignets', 'bibimbap', 'bread pudding', 'breakfast burrito',
18
+ 'bruschetta', 'caesar_salad', 'cannoli', 'caprese salad', 'carrot cake',
19
+ 'ceviche', 'cheese plate', 'cheesecake', 'chicken curry',
20
+ 'chicken quesadilla', 'chicken wings', 'chocolate cake', 'chocolate mousse',
21
+ 'churros', 'clam chowder', 'club sandwich', 'crab cakes', 'creme brulee',
22
+ 'croque madame', 'cup cakes', 'deviled eggs', 'donuts', 'dumplings', 'edamame',
23
+ 'eggs benedict', 'escargots', 'falafel', 'filet mignon', 'fish and chips',
24
+ 'foie gras', 'french fries', 'french onion soup', 'french toast',
25
+ 'fried calamari', 'fried rice', 'frozen yogurt', 'garlic bread', 'gnocchi',
26
+ 'greek salad', 'grilled cheese sandwich', 'grilled salmon', 'guacamole',
27
+ 'gyoza', 'hamburger', 'hot and sour soup', 'hot dog', 'huevos rancheros',
28
+ 'hummus', 'ice cream', 'lasagna', 'lobster bisque', 'lobster roll sandwich',
29
+ 'macaroni and cheese', 'macarons', 'miso soup', 'mussels', 'nachos',
30
+ 'omelette', 'onion rings', 'oysters', 'pad thai', 'paella', 'pancakes',
31
+ 'panna cotta', 'peking duck', 'pho', 'pizza', 'pork chop', 'poutine',
32
+ 'prime rib', 'pulled pork sandwich', 'ramen', 'ravioli', 'red velvet cake',
33
+ 'risotto', 'samosa', 'sashimi', 'scallops', 'seaweed salad',
34
+ 'shrimp and grits', 'spaghetti bolognese', 'spaghetti carbonara',
35
+ 'spring rolls', 'steak', 'strawberry_shortcake', 'sushi', 'tacos', 'takoyaki',
36
+ 'tiramisu', 'tuna tartare', 'waffles']
37
+
38
+ classes_india = ['burger','butter_naan', 'chai', 'chapati', 'chole_bhature', 'dal_makhani', 'dhokla', 'fried_rice', 'idli',
39
+ 'jalebi', 'kaathi_rolls', 'kadai_paneer', 'kulfi', 'masala_dosa', 'momos', 'paani_puri', 'pakode', 'pav_bhaji',
40
+ 'pizza', 'samosa']
41
+
42
+
43
+ def food_nofood_pred(input_image):
44
+ # input labels for clip model
45
+ labels = ['food', 'not food']
46
+
47
+ # CLIP Model for classification
48
+ food_nofood_model = CLIPModel.from_pretrained("flax-community/clip-rsicd-v2")
49
+ processor = CLIPProcessor.from_pretrained("flax-community/clip-rsicd-v2")
50
+
51
+ # image = Image.open(requests.get(uploaded_file, stream=True).raw)
52
+ inputs = processor(text=[f"a photo of a {l}" for l in labels], images=input_image, return_tensors="pt", padding=True)
53
+ outputs = food_nofood_model(**inputs)
54
+ logits_per_image = outputs.logits_per_image
55
+ probs = logits_per_image.softmax(dim=1)
56
+ print(probs)
57
+ pred = probs.detach().cpu().numpy().argmax(axis=1)
58
+ pred_class = labels[pred[0]]
59
+ return pred_class
60
+
61
+ def make_pred_outside_india(input_img, model, device, user_location):
62
+ input_img = input_img.unsqueeze(0)
63
+ model.eval()
64
+ pred = model(input_img)
65
+ # if torch.cuda.is_available():
66
+ # pred = F.softmax(pred).detach().cpu().numpy()
67
+ # y_prob = pred.argmax(axis=1)[0] #return index with highest class probability
68
+ # else:
69
+ pred = F.softmax(pred).detach().numpy()
70
+ y_prob = pred.argmax(axis=1)[0]
71
+
72
+ if(user_location=='Outside India'):
73
+ class_label = classes_outside_india[y_prob]
74
+ elif(user_location=='India'):
75
+ class_label = classes_india[y_prob]
76
+ return class_label
77
+
78
+
79
+ def getmodel_outside_india(model_path):
80
+ # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
81
+ effnet_b0 = timm.create_model(pretrained=True, model_name='tf_efficientnet_b0')
82
+
83
+ for param in effnet_b0.parameters():
84
+ param.requires_grad = True
85
+
86
+ effnet_b0.classifier = nn.Linear(1280, len(classes_outside_india))
87
+ effnet_b0 = effnet_b0
88
+
89
+ #Model Loading
90
+ effnet_b0.load_state_dict(torch.load(model_path,map_location='cpu'))
91
+ return effnet_b0
92
+
93
+
94
+ def getmodel_india(model_path):
95
+ #defining model
96
+ # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
97
+ effnet_b0 = timm.create_model(pretrained=True, model_name='tf_efficientnet_b0')
98
+
99
+ for param in effnet_b0.parameters():
100
+ param.requires_grad = True
101
+
102
+ effnet_b0.classifier = nn.Linear(1280, len(classes_india))
103
+ effnet_b0 = effnet_b0
104
+
105
+ #Model Loading
106
+ effnet_b0.load_state_dict(torch.load(model_path, map_location='cpu'))
107
+ return effnet_b0
108
+
109
+
110
+ def load_prepare_img(image):
111
+ normalize = transforms.Normalize(
112
+ [0.485, 0.456, 0.406],
113
+ [0.229, 0.224, 0.225]
114
+ )
115
+
116
+ test_transform = transforms.Compose([
117
+ transforms.Resize((225, 225)),
118
+ transforms.CenterCrop(224),
119
+ transforms.ToTensor(),
120
+ normalize,
121
+ ])
122
+ input_img = test_transform(image)
123
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
124
+ return input_img,device
125
+
126
+ def fetch_recipe(food_name):
127
+ url = "https://recipesapi2.p.rapidapi.com/recipes/"+food_name
128
+ querystring = {"maxRecipes":"1"}
129
+
130
+ headers = {
131
+ 'x-rapidapi-host': "recipesapi2.p.rapidapi.com",
132
+ 'x-rapidapi-key': "f6f6823b91msh9e92fed91d5356ap136f5djsn494d8f582fb3"
133
+ }
134
+
135
+ response = requests.request("GET", url, headers=headers, params=querystring)
136
+ json_data = json.loads(response.text)
137
+
138
+ recipe_data = json_data['data'][0]
139
+
140
+ return recipe_data