Tejasn commited on
Commit
e74edb5
1 Parent(s): b5ab758

Upload 10 files

Browse files
FetchRecipe.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+
4
+ url = "https://rapidapi.com/spoonacular/api/recipe-food-nutrition"
5
+
6
+ querystring = {"q":"chicken soup"}
7
+
8
+ headers = {
9
+ 'x-rapidapi-host': "rapidapi.com/spoonacular/api/",
10
+ 'x-rapidapi-key': "1f9b61c859214d3ab6a00a6d82ec5a85"
11
+ }
12
+
13
+ response = requests.request("GET", url, headers=headers, params=querystring)
14
+ json_data = json.loads(response.text)
15
+
16
+ print(json_data)
Procfile.txt ADDED
@@ -0,0 +1 @@
 
1
+ web: sh setup.sh && streamlit run app.py
README.md CHANGED
@@ -1,12 +1,38 @@
1
  ---
2
- title: Foodreceipe
3
- emoji: 🐠
4
- colorFrom: pink
5
- colorTo: yellow
6
  sdk: streamlit
7
- sdk_version: 1.15.2
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: SeeFood
3
+ emoji: 🐨
4
+ colorFrom: yellow
5
+ colorTo: pink
6
  sdk: streamlit
7
+ sdk_version: 1.10.0
8
  app_file: app.py
9
  pinned: false
10
  ---
11
 
12
+ # Configuration
13
+
14
+ `title`: _string_
15
+ Display title for the Space
16
+
17
+ `emoji`: _string_
18
+ Space emoji (emoji-only character allowed)
19
+
20
+ `colorFrom`: _string_
21
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
22
+
23
+ `colorTo`: _string_
24
+ Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray)
25
+
26
+ `sdk`: _string_
27
+ Can be either `gradio` or `streamlit`
28
+
29
+ `sdk_version` : _string_
30
+ Only applicable for `streamlit` SDK.
31
+ See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions.
32
+
33
+ `app_file`: _string_
34
+ Path to your main application file (which contains either `gradio` or `streamlit` Python code).
35
+ Path is relative to the root of the repository.
36
+
37
+ `pinned`: _boolean_
38
+ Whether the Space stays on top of your list.
RecipeData.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import json
3
+ import random
4
+
5
+ API_KEY = '1f9b61c859214d3ab6a00a6d82ec5a85'
6
+
7
+ def fetchRecipeData(foodName, apiKey = API_KEY):
8
+ recipe = {}
9
+
10
+ # Fetching recipe Details from food name
11
+ url = f"https://api.spoonacular.com/recipes/search?query={foodName}&apiKey={apiKey}"
12
+ response = requests.get(url)
13
+ json_data = response.json()
14
+
15
+ # saving responce code
16
+ response_status_code = response.status_code
17
+
18
+ # selecting random recipe from fetched recipes
19
+ recipe_list = json_data['results']
20
+ foodRecipe = random.choice(recipe_list)
21
+
22
+ recipe_ID = foodRecipe['id']
23
+
24
+ # getting recipe details from api using recipe id
25
+ url = f"https://api.spoonacular.com/recipes/{recipe_ID}/information?apiKey={apiKey}&includeNutrition=true"
26
+ recipe_response = requests.get(url)
27
+ all_recipe_json_data = recipe_response.json()
28
+
29
+ # recipe instructions
30
+ recipe_instructions = preprocessing_instructions(all_recipe_json_data['instructions'])
31
+
32
+ # recipe summary
33
+ recipe_summary = all_recipe_json_data['summary']
34
+
35
+ # recipe ingredients
36
+ recipe_Ingredients = all_recipe_json_data['extendedIngredients']
37
+ for i, dict in enumerate(recipe_Ingredients):
38
+ recipe_Ingredients[i] = dict['originalName']
39
+ Ingredients = ', '.join(recipe_Ingredients)
40
+
41
+ # caloric Breakdow of recipe
42
+ recipe_caloric_breakdown = all_recipe_json_data['nutrition']['caloricBreakdown']
43
+
44
+ # storing all values in recipe dict
45
+ recipe['id'] = recipe_ID
46
+ recipe['title'] = foodRecipe['title']
47
+ recipe['readyTime'] = foodRecipe['readyInMinutes']
48
+ recipe['soureUrl'] = foodRecipe['sourceUrl']
49
+
50
+ recipe['instructions'] = recipe_instructions
51
+
52
+ recipe['ingridents'] = recipe_Ingredients
53
+
54
+ recipe_summary = recipe_summary.replace('<b>', '')
55
+ recipe_summary = recipe_summary.replace('</b>', '')
56
+ recipe['summary'] = recipe_summary
57
+
58
+ recipe['percentProtein'] = recipe_caloric_breakdown['percentProtein']
59
+ recipe['percentFat'] = recipe_caloric_breakdown['percentFat']
60
+ recipe['percentCarbs'] = recipe_caloric_breakdown['percentCarbs']
61
+
62
+ return response_status_code, recipe
63
+
64
+
65
+ def preprocessing_instructions(text):
66
+ word_to_remove = ['<ol>', '</ol>', '<li>', '</li>']
67
+ for word in word_to_remove:
68
+ text = text.replace(word, '')
69
+ return text
efficientnet_b0.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aeb5a1224fdaf0fdda08749bc702f37f4d2ac1d9e95949aa78d5110c3e6ce93c
3
+ size 16840433
gitattributes.txt ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
helper_functions.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ### We create a bunch of helpful functions throughout the course.
2
+ ### Storing them here so they're easily accessible.
3
+
4
+ import tensorflow as tf
5
+
6
+ # Create a function to import an image and resize it to be able to be used with our model
7
+ def load_and_prep_image(filename, img_shape=224, scale=True):
8
+ """
9
+ Reads in an image from filename, turns it into a tensor and reshapes into
10
+ (224, 224, 3).
11
+
12
+ Parameters
13
+ ----------
14
+ filename (str): string filename of target image
15
+ img_shape (int): size to resize target image to, default 224
16
+ scale (bool): whether to scale pixel values to range(0, 1), default True
17
+ """
18
+ # Read in the image
19
+ img = tf.io.read_file(filename)
20
+ # Decode it into a tensor
21
+ img = tf.image.decode_jpeg(img)
22
+ # Resize the image
23
+ img = tf.image.resize(img, [img_shape, img_shape])
24
+ if scale:
25
+ # Rescale the image (get all values between 0 and 1)
26
+ return img/255.
27
+ else:
28
+ return img
29
+
30
+ # Note: The following confusion matrix code is a remix of Scikit-Learn's
31
+ # plot_confusion_matrix function - https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html
32
+ import itertools
33
+ import matplotlib.pyplot as plt
34
+ import numpy as np
35
+ from sklearn.metrics import confusion_matrix
36
+
37
+ # Our function needs a different name to sklearn's plot_confusion_matrix
38
+ def make_confusion_matrix(y_true, y_pred, classes=None, figsize=(10, 10), text_size=15, norm=False, savefig=False):
39
+ """Makes a labelled confusion matrix comparing predictions and ground truth labels.
40
+
41
+ If classes is passed, confusion matrix will be labelled, if not, integer class values
42
+ will be used.
43
+
44
+ Args:
45
+ y_true: Array of truth labels (must be same shape as y_pred).
46
+ y_pred: Array of predicted labels (must be same shape as y_true).
47
+ classes: Array of class labels (e.g. string form). If `None`, integer labels are used.
48
+ figsize: Size of output figure (default=(10, 10)).
49
+ text_size: Size of output figure text (default=15).
50
+ norm: normalize values or not (default=False).
51
+ savefig: save confusion matrix to file (default=False).
52
+
53
+ Returns:
54
+ A labelled confusion matrix plot comparing y_true and y_pred.
55
+
56
+ Example usage:
57
+ make_confusion_matrix(y_true=test_labels, # ground truth test labels
58
+ y_pred=y_preds, # predicted labels
59
+ classes=class_names, # array of class label names
60
+ figsize=(15, 15),
61
+ text_size=10)
62
+ """
63
+ # Create the confustion matrix
64
+ cm = confusion_matrix(y_true, y_pred)
65
+ cm_norm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] # normalize it
66
+ n_classes = cm.shape[0] # find the number of classes we're dealing with
67
+
68
+ # Plot the figure and make it pretty
69
+ fig, ax = plt.subplots(figsize=figsize)
70
+ cax = ax.matshow(cm, cmap=plt.cm.Blues) # colors will represent how 'correct' a class is, darker == better
71
+ fig.colorbar(cax)
72
+
73
+ # Are there a list of classes?
74
+ if classes:
75
+ labels = classes
76
+ else:
77
+ labels = np.arange(cm.shape[0])
78
+
79
+ # Label the axes
80
+ ax.set(title="Confusion Matrix",
81
+ xlabel="Predicted label",
82
+ ylabel="True label",
83
+ xticks=np.arange(n_classes), # create enough axis slots for each class
84
+ yticks=np.arange(n_classes),
85
+ xticklabels=labels, # axes will labeled with class names (if they exist) or ints
86
+ yticklabels=labels)
87
+
88
+ # Make x-axis labels appear on bottom
89
+ ax.xaxis.set_label_position("bottom")
90
+ ax.xaxis.tick_bottom()
91
+
92
+ # Set the threshold for different colors
93
+ threshold = (cm.max() + cm.min()) / 2.
94
+
95
+ # Plot the text on each cell
96
+ for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
97
+ if norm:
98
+ plt.text(j, i, f"{cm[i, j]} ({cm_norm[i, j]*100:.1f}%)",
99
+ horizontalalignment="center",
100
+ color="white" if cm[i, j] > threshold else "black",
101
+ size=text_size)
102
+ else:
103
+ plt.text(j, i, f"{cm[i, j]}",
104
+ horizontalalignment="center",
105
+ color="white" if cm[i, j] > threshold else "black",
106
+ size=text_size)
107
+
108
+ # Save the figure to the current working directory
109
+ if savefig:
110
+ fig.savefig("confusion_matrix.png")
111
+
112
+ # Make a function to predict on images and plot them (works with multi-class)
113
+ def pred_and_plot(model, filename, class_names):
114
+ """
115
+ Imports an image located at filename, makes a prediction on it with
116
+ a trained model and plots the image with the predicted class as the title.
117
+ """
118
+ # Import the target image and preprocess it
119
+ img = load_and_prep_image(filename)
120
+
121
+ # Make a prediction
122
+ pred = model.predict(tf.expand_dims(img, axis=0))
123
+
124
+ # Get the predicted class
125
+ if len(pred[0]) > 1: # check for multi-class
126
+ pred_class = class_names[pred.argmax()] # if more than one output, take the max
127
+ else:
128
+ pred_class = class_names[int(tf.round(pred)[0][0])] # if only one output, round
129
+
130
+ # Plot the image and predicted class
131
+ plt.imshow(img)
132
+ plt.title(f"Prediction: {pred_class}")
133
+ plt.axis(False);
134
+
135
+ import datetime
136
+
137
+ def create_tensorboard_callback(dir_name, experiment_name):
138
+ """
139
+ Creates a TensorBoard callback instand to store log files.
140
+
141
+ Stores log files with the filepath:
142
+ "dir_name/experiment_name/current_datetime/"
143
+
144
+ Args:
145
+ dir_name: target directory to store TensorBoard log files
146
+ experiment_name: name of experiment directory (e.g. efficientnet_model_1)
147
+ """
148
+ log_dir = dir_name + "/" + experiment_name + "/" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
149
+ tensorboard_callback = tf.keras.callbacks.TensorBoard(
150
+ log_dir=log_dir
151
+ )
152
+ print(f"Saving TensorBoard log files to: {log_dir}")
153
+ return tensorboard_callback
154
+
155
+ # Plot the validation and training data separately
156
+ import matplotlib.pyplot as plt
157
+
158
+ def plot_loss_curves(history):
159
+ """
160
+ Returns separate loss curves for training and validation metrics.
161
+
162
+ Args:
163
+ history: TensorFlow model History object (see: https://www.tensorflow.org/api_docs/python/tf/keras/callbacks/History)
164
+ """
165
+ loss = history.history['loss']
166
+ val_loss = history.history['val_loss']
167
+
168
+ accuracy = history.history['accuracy']
169
+ val_accuracy = history.history['val_accuracy']
170
+
171
+ epochs = range(len(history.history['loss']))
172
+
173
+ # Plot loss
174
+ plt.plot(epochs, loss, label='training_loss')
175
+ plt.plot(epochs, val_loss, label='val_loss')
176
+ plt.title('Loss')
177
+ plt.xlabel('Epochs')
178
+ plt.legend()
179
+
180
+ # Plot accuracy
181
+ plt.figure()
182
+ plt.plot(epochs, accuracy, label='training_accuracy')
183
+ plt.plot(epochs, val_accuracy, label='val_accuracy')
184
+ plt.title('Accuracy')
185
+ plt.xlabel('Epochs')
186
+ plt.legend();
187
+
188
+ def compare_historys(original_history, new_history, initial_epochs=5):
189
+ """
190
+ Compares two TensorFlow model History objects.
191
+
192
+ Args:
193
+ original_history: History object from original model (before new_history)
194
+ new_history: History object from continued model training (after original_history)
195
+ initial_epochs: Number of epochs in original_history (new_history plot starts from here)
196
+ """
197
+
198
+ # Get original history measurements
199
+ acc = original_history.history["accuracy"]
200
+ loss = original_history.history["loss"]
201
+
202
+ val_acc = original_history.history["val_accuracy"]
203
+ val_loss = original_history.history["val_loss"]
204
+
205
+ # Combine original history with new history
206
+ total_acc = acc + new_history.history["accuracy"]
207
+ total_loss = loss + new_history.history["loss"]
208
+
209
+ total_val_acc = val_acc + new_history.history["val_accuracy"]
210
+ total_val_loss = val_loss + new_history.history["val_loss"]
211
+
212
+ # Make plots
213
+ plt.figure(figsize=(8, 8))
214
+ plt.subplot(2, 1, 1)
215
+ plt.plot(total_acc, label='Training Accuracy')
216
+ plt.plot(total_val_acc, label='Validation Accuracy')
217
+ plt.plot([initial_epochs-1, initial_epochs-1],
218
+ plt.ylim(), label='Start Fine Tuning') # reshift plot around epochs
219
+ plt.legend(loc='lower right')
220
+ plt.title('Training and Validation Accuracy')
221
+
222
+ plt.subplot(2, 1, 2)
223
+ plt.plot(total_loss, label='Training Loss')
224
+ plt.plot(total_val_loss, label='Validation Loss')
225
+ plt.plot([initial_epochs-1, initial_epochs-1],
226
+ plt.ylim(), label='Start Fine Tuning') # reshift plot around epochs
227
+ plt.legend(loc='upper right')
228
+ plt.title('Training and Validation Loss')
229
+ plt.xlabel('epoch')
230
+ plt.show()
231
+
232
+ # Create function to unzip a zipfile into current working directory
233
+ # (since we're going to be downloading and unzipping a few files)
234
+ import zipfile
235
+
236
+ def unzip_data(filename):
237
+ """
238
+ Unzips filename into the current working directory.
239
+
240
+ Args:
241
+ filename (str): a filepath to a target zip folder to be unzipped.
242
+ """
243
+ zip_ref = zipfile.ZipFile(filename, "r")
244
+ zip_ref.extractall()
245
+ zip_ref.close()
246
+
247
+ # Walk through an image classification directory and find out how many files (images)
248
+ # are in each subdirectory.
249
+ import os
250
+
251
+ def walk_through_dir(dir_path):
252
+ """
253
+ Walks through dir_path returning its contents.
254
+
255
+ Args:
256
+ dir_path (str): target directory
257
+
258
+ Returns:
259
+ A print out of:
260
+ number of subdiretories in dir_path
261
+ number of images (files) in each subdirectory
262
+ name of each subdirectory
263
+ """
264
+ for dirpath, dirnames, filenames in os.walk(dir_path):
265
+ print(f"There are {len(dirnames)} directories and {len(filenames)} images in '{dirpath}'.")
266
+
267
+ # Function to evaluate: accuracy, precision, recall, f1-score
268
+ from sklearn.metrics import accuracy_score, precision_recall_fscore_support
269
+
270
+ def calculate_results(y_true, y_pred):
271
+ """
272
+ Calculates model accuracy, precision, recall and f1 score of a binary classification model.
273
+
274
+ Args:
275
+ y_true: true labels in the form of a 1D array
276
+ y_pred: predicted labels in the form of a 1D array
277
+
278
+ Returns a dictionary of accuracy, precision, recall, f1-score.
279
+ """
280
+ # Calculate model accuracy
281
+ model_accuracy = accuracy_score(y_true, y_pred) * 100
282
+ # Calculate model precision, recall and f1 score using "weighted average
283
+ model_precision, model_recall, model_f1, _ = precision_recall_fscore_support(y_true, y_pred, average="weighted")
284
+ model_results = {"accuracy": model_accuracy,
285
+ "precision": model_precision,
286
+ "recall": model_recall,
287
+ "f1": model_f1}
288
+ return model_results
indian_efficientnet_b0.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bf08c828e0305a1965a07295a5edf5580599d41afab0ce160b1a5ea8bcbeef4
3
+ size 16425393
requirements (1).txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
1
+ streamlit>=1.0.0
2
+ numpy>=1.9.2
3
+ pandas>=0.19
4
+ tensorflow==2.6.0
5
+ matplotlib>=1.4.3
6
+ scikit-learn>=0.18
utils.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ import json
4
+ import torch
5
+ import timm
6
+ import torch.nn as nn
7
+ import torchvision
8
+ from torchvision import transforms, datasets, models
9
+ import torch.nn.functional as F
10
+ import PIL
11
+ import PIL.Image as Image
12
+ import numpy as np
13
+
14
+
15
+
16
+ classes_outside_india = ['apple pie', 'baby back ribs', 'baklava', 'beef carpaccio', 'beef tartare',
17
+ 'beet salad', 'beignets', 'bibimbap', 'bread pudding', 'breakfast burrito',
18
+ 'bruschetta', 'caesar_salad', 'cannoli', 'caprese salad', 'carrot cake',
19
+ 'ceviche', 'cheese plate', 'cheesecake', 'chicken curry',
20
+ 'chicken quesadilla', 'chicken wings', 'chocolate cake', 'chocolate mousse',
21
+ 'churros', 'clam chowder', 'club sandwich', 'crab cakes', 'creme brulee',
22
+ 'croque madame', 'cup cakes', 'deviled eggs', 'donuts', 'dumplings', 'edamame',
23
+ 'eggs benedict', 'escargots', 'falafel', 'filet mignon', 'fish and chips',
24
+ 'foie gras', 'french fries', 'french onion soup', 'french toast',
25
+ 'fried calamari', 'fried rice', 'frozen yogurt', 'garlic bread', 'gnocchi',
26
+ 'greek salad', 'grilled cheese sandwich', 'grilled salmon', 'guacamole',
27
+ 'gyoza', 'hamburger', 'hot and sour soup', 'hot dog', 'huevos rancheros',
28
+ 'hummus', 'ice cream', 'lasagna', 'lobster bisque', 'lobster roll sandwich',
29
+ 'macaroni and cheese', 'macarons', 'miso soup', 'mussels', 'nachos',
30
+ 'omelette', 'onion rings', 'oysters', 'pad thai', 'paella', 'pancakes',
31
+ 'panna cotta', 'peking duck', 'pho', 'pizza', 'pork chop', 'poutine',
32
+ 'prime rib', 'pulled pork sandwich', 'ramen', 'ravioli', 'red velvet cake',
33
+ 'risotto', 'samosa', 'sashimi', 'scallops', 'seaweed salad',
34
+ 'shrimp and grits', 'spaghetti bolognese', 'spaghetti carbonara',
35
+ 'spring rolls', 'steak', 'strawberry_shortcake', 'sushi', 'tacos', 'takoyaki',
36
+ 'tiramisu', 'tuna tartare', 'waffles']
37
+
38
+ classes_india = ['burger','butter_naan', 'chai', 'chapati', 'chole_bhature', 'dal_makhani', 'dhokla', 'fried_rice', 'idli',
39
+ 'jalebi', 'kaathi_rolls', 'kadai_paneer', 'kulfi', 'masala_dosa', 'momos', 'paani_puri', 'pakode', 'pav_bhaji',
40
+ 'pizza', 'samosa']
41
+
42
+
43
+ def make_pred_outside_india(input_img, model, device, user_location):
44
+ input_img = input_img.unsqueeze(0)
45
+ model.eval()
46
+ pred = model(input_img)
47
+ # if torch.cuda.is_available():
48
+ # pred = F.softmax(pred).detach().cpu().numpy()
49
+ # y_prob = pred.argmax(axis=1)[0] #return index with highest class probability
50
+ # else:
51
+ pred = F.softmax(pred).detach().numpy()
52
+ y_prob = pred.argmax(axis=1)[0]
53
+
54
+ if(user_location=='Outside_India'):
55
+ class_label = classes_outside_india[y_prob]
56
+ elif(user_location=='India'):
57
+ class_label = classes_india[y_prob]
58
+ return class_label
59
+
60
+
61
+ def getmodel_outside_india(model_path):
62
+ # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
63
+ effnet_b0 = timm.create_model(pretrained=True, model_name='tf_efficientnet_b0')
64
+
65
+ for param in effnet_b0.parameters():
66
+ param.requires_grad = True
67
+
68
+ effnet_b0.classifier = nn.Linear(1280, len(classes_outside_india))
69
+ effnet_b0 = effnet_b0
70
+
71
+ #Model Loading
72
+ effnet_b0.load_state_dict(torch.load(model_path,map_location='cpu'))
73
+ return effnet_b0
74
+
75
+
76
+ def getmodel_india(model_path):
77
+ #defining model
78
+ # device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
79
+ effnet_b0 = timm.create_model(pretrained=True, model_name='tf_efficientnet_b0')
80
+
81
+ for param in effnet_b0.parameters():
82
+ param.requires_grad = True
83
+
84
+ effnet_b0.classifier = nn.Linear(1280, len(classes_india))
85
+ effnet_b0 = effnet_b0
86
+
87
+ #Model Loading
88
+ effnet_b0.load_state_dict(torch.load(model_path, map_location='cpu'))
89
+ return effnet_b0
90
+
91
+
92
+ def load_prepare_img(image):
93
+ normalize = transforms.Normalize(
94
+ [0.485, 0.456, 0.406],
95
+ [0.229, 0.224, 0.225]
96
+ )
97
+
98
+ test_transform = transforms.Compose([
99
+ transforms.Resize((225, 225)),
100
+ transforms.CenterCrop(224),
101
+ transforms.ToTensor(),
102
+ normalize,
103
+ ])
104
+ input_img = test_transform(image)
105
+ device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
106
+ return input_img,device
107
+
108
+ def fetch_recipe(food_name):
109
+ url = "https://recipesapi2.p.rapidapi.com/recipes/"+food_name
110
+ querystring = {"maxRecipes":"1"}
111
+
112
+ headers = {
113
+ 'x-rapidapi-host': "recipesapi2.p.rapidapi.com",
114
+ 'x-rapidapi-key': "f6f6823b91msh9e92fed91d5356ap136f5djsn494d8f582fb3"
115
+ }
116
+
117
+ response = requests.request("GET", url, headers=headers, params=querystring)
118
+ json_data = json.loads(response.text)
119
+
120
+ recipe_data = json_data['data'][0]
121
+
122
+ return recipe_data