MohammadAliMKH commited on
Commit
2b0f6fa
1 Parent(s): 07e50a7

Upload 10 files

Browse files
app.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import gradio as gr
3
+ from predict import predict_gradio
4
+
5
+ title = "Mohammad Ali Food101 Classification🍔"
6
+ description = "This demo is a related to classification of 101 different foods"
7
+
8
+
9
+ demo = gr.Interface(
10
+ predict_gradio,
11
+ inputs = gr.Image(type="pil"),
12
+ outputs = [gr.Label(num_top_classes = 5 , label = "All Predictions in 5 most value predicted classes"),
13
+ gr.Label(num_top_classes = 1 , label = "Model Predicts Image as a")],
14
+ examples=['examples/3243342.jpg', 'examples/1652678.jpg', 'examples/1046933.jpg', 'examples/1840999.jpg', 'examples/168855.jpg'],
15
+ title = title,
16
+ description = description
17
+ )
18
+
19
+ demo.launch()
efficient_model_101.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:968ae5f37d30877631a9ba567f8f428febf42e7df8c39da6702c7e63075e7eb1
3
+ size 31906417
examples/1046933.jpg ADDED
examples/1652678.jpg ADDED
examples/168855.jpg ADDED
examples/1840999.jpg ADDED
examples/3243342.jpg ADDED
model.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ from torchvision.models import EfficientNet_B2_Weights , efficientnet_b2
4
+
5
+ efficient_weight = EfficientNet_B2_Weights.DEFAULT
6
+
7
+ efficient_transformer = efficient_weight.transforms()
8
+
9
+ efficient_model = torch.load("efficient_model_101.pth").to("cpu")
predict.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ from PIL import Image
4
+
5
+
6
+ FOOD101_CLASS_NAMES = [
7
+ 'apple_pie',
8
+ 'baby_back_ribs',
9
+ 'baklava',
10
+ 'beef_carpaccio',
11
+ 'beef_tartare',
12
+ 'beet_salad',
13
+ 'beignets',
14
+ 'bibimbap',
15
+ 'bread_pudding',
16
+ 'breakfast_burrito',
17
+ 'bruschetta',
18
+ 'caesar_salad',
19
+ 'cannoli',
20
+ 'caprese_salad',
21
+ 'carrot_cake',
22
+ 'ceviche',
23
+ 'cheesecake',
24
+ 'cheese_plate',
25
+ 'chicken_curry',
26
+ 'chicken_quesadilla',
27
+ 'chicken_wings',
28
+ 'chocolate_cake',
29
+ 'chocolate_mousse',
30
+ 'churros',
31
+ 'clam_chowder',
32
+ 'club_sandwich',
33
+ 'crab_cakes',
34
+ 'creme_brulee',
35
+ 'croque_madame',
36
+ 'cup_cakes',
37
+ 'deviled_eggs',
38
+ 'donuts',
39
+ 'dumplings',
40
+ 'edamame',
41
+ 'eggs_benedict',
42
+ 'escargots',
43
+ 'falafel',
44
+ 'filet_mignon',
45
+ 'fish_and_chips',
46
+ 'foie_gras',
47
+ 'french_fries',
48
+ 'french_onion_soup',
49
+ 'french_toast',
50
+ 'fried_calamari',
51
+ 'fried_rice',
52
+ 'frozen_yogurt',
53
+ 'garlic_bread',
54
+ 'gnocchi',
55
+ 'greek_salad',
56
+ 'grilled_cheese_sandwich',
57
+ 'grilled_salmon',
58
+ 'guacamole',
59
+ 'gyoza',
60
+ 'hamburger',
61
+ 'hot_and_sour_soup',
62
+ 'hot_dog',
63
+ 'huevos_rancheros',
64
+ 'hummus',
65
+ 'ice_cream',
66
+ 'lasagna',
67
+ 'lobster_bisque',
68
+ 'lobster_roll_sandwich',
69
+ 'macaroni_and_cheese',
70
+ 'macarons',
71
+ 'miso_soup',
72
+ 'mussels',
73
+ 'nachos',
74
+ 'omelette',
75
+ 'onion_rings',
76
+ 'oysters',
77
+ 'pad_thai',
78
+ 'paella',
79
+ 'pancakes',
80
+ 'panna_cotta',
81
+ 'peking_duck',
82
+ 'pho',
83
+ 'pizza',
84
+ 'pork_chop',
85
+ 'poutine',
86
+ 'prime_rib',
87
+ 'pulled_pork_sandwich',
88
+ 'ramen',
89
+ 'ravioli',
90
+ 'red_velvet_cake',
91
+ 'risotto',
92
+ 'samosa',
93
+ 'sashimi',
94
+ 'scallops',
95
+ 'seaweed_salad',
96
+ 'shrimp_and_grits',
97
+ 'spaghetti_bolognese',
98
+ 'spaghetti_carbonara',
99
+ 'spring_rolls',
100
+ 'steak',
101
+ 'strawberry_shortcake',
102
+ 'sushi',
103
+ 'tacos',
104
+ 'takoyaki',
105
+ 'tiramisu',
106
+ 'tuna_tartare',
107
+ 'waffles']
108
+
109
+ def predict_gradio(image:PIL.Image):
110
+
111
+ image = efficient_transformer(image)
112
+
113
+ efficient_model.eval()
114
+
115
+ with torch.no_grad():
116
+ pred = efficient_model(torch.unsqueeze(image , dim = 0))
117
+
118
+ prediction_per_labels = {FOOD101_CLASS_NAMES[i]: float(torch.sigmoid(pred[0][i])) for i in range(len(FOOD101_CLASS_NAMES))}
119
+
120
+ prediction = FOOD101_CLASS_NAMES[torch.argmax(pred).item()]
121
+
122
+ return prediction_per_labels , prediction
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch==2.0.1
2
+ torchvision==0.15.2
3
+ gradio==3.37.0
4
+ PIL==8.4.0