GeorgeImmanuel commited on
Commit
aa6a6fe
1 Parent(s): 49190f7

we have unzipped and moved the files to the right directory

Browse files
app.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # import the essentials
2
+ import torch
3
+ import torchvision
4
+ import time
5
+ import gradio as gr
6
+ import numpy as np
7
+ from pathlib import Path
8
+ from model import create_effnet_b2_model
9
+
10
+ with open(class_names.txt) as f:
11
+ class_names = [class_name.strip('\n') for class_name in f.readlines()]
12
+
13
+ device = 'cuda' if torch.cuda.is_available else 'cpu'
14
+
15
+ # creating the vit_b_16_model and loading it with state_dict of our trained model
16
+ effnetb2_model,effnetb2_transform = create_effnet_b2_model(num_classes=len(class_names))
17
+ effnetb2_model.load_state_dict(torch.load(f='effnetb2_20percent_101classes.pth'))
18
+
19
+ # create the predict function
20
+ def predict(img):
21
+
22
+ """
23
+ args:
24
+ img: is an image
25
+
26
+ returns: prediction class, prediction probability, and time taken to make the prediction
27
+
28
+ """
29
+
30
+ # transforming the image
31
+ tr_img = effnetb2_transform(img).unsqueeze(dim=0).to(device)
32
+
33
+ # make prediction with vit_b_16
34
+ model = effnetb2_model.to(device)
35
+
36
+ # starting the time
37
+ start_time = time.perf_counter()
38
+
39
+ model.eval()
40
+ with torch.inference_mode():
41
+ pred_logit = model(tr_img)
42
+ pred_label = torch.argmax(pred_logit,dim=1).cpu()
43
+ pred_prob = torch.max(torch.softmax(pred_logit,dim=1)).cpu().item()
44
+
45
+ # ending the time
46
+ end_time = time.perf_counter()
47
+ pred_prob = float(np.round(pred_prob,3))
48
+ pred_class = class_names[pred_label]
49
+ time_taken = float(np.round(end_time-start_time,3))
50
+
51
+
52
+
53
+ return pred_class,pred_prob,time_taken
54
+
55
+
56
+ # create example list
57
+ example_dir = Path('demos/foodvision_big/examples')
58
+ example_list = [['examples/' + str(filepath)] for filepath in os.listdir(example_dir)]
59
+
60
+ # create Gradio interface
61
+ description = 'A machine learning model to classify images into pizza,steak and sushi appropriately'
62
+ title = 'Image Classifier'
63
+
64
+
65
+ demo = gr.Interface(fn=predict, # this function maps the inputs to the output
66
+ inputs=gr.Image(type='pil'), # pillow image
67
+ outputs=[gr.Label(num_top_classes=1,label='Prediction'),
68
+ gr.Number(label='prediction probability'),
69
+ gr.Number(label='prediction time(s)')],
70
+ examples=example_list,
71
+ description=description,
72
+ title=title
73
+ )
74
+
75
+ demo.launch()
class_names.txt ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apple_pie
2
+ baby_back_ribs
3
+ baklava
4
+ beef_carpaccio
5
+ beef_tartare
6
+ beet_salad
7
+ beignets
8
+ bibimbap
9
+ bread_pudding
10
+ breakfast_burrito
11
+ bruschetta
12
+ caesar_salad
13
+ cannoli
14
+ caprese_salad
15
+ carrot_cake
16
+ ceviche
17
+ cheese_plate
18
+ cheesecake
19
+ chicken_curry
20
+ chicken_quesadilla
21
+ chicken_wings
22
+ chocolate_cake
23
+ chocolate_mousse
24
+ churros
25
+ clam_chowder
26
+ club_sandwich
27
+ crab_cakes
28
+ creme_brulee
29
+ croque_madame
30
+ cup_cakes
31
+ deviled_eggs
32
+ donuts
33
+ dumplings
34
+ edamame
35
+ eggs_benedict
36
+ escargots
37
+ falafel
38
+ filet_mignon
39
+ fish_and_chips
40
+ foie_gras
41
+ french_fries
42
+ french_onion_soup
43
+ french_toast
44
+ fried_calamari
45
+ fried_rice
46
+ frozen_yogurt
47
+ garlic_bread
48
+ gnocchi
49
+ greek_salad
50
+ grilled_cheese_sandwich
51
+ grilled_salmon
52
+ guacamole
53
+ gyoza
54
+ hamburger
55
+ hot_and_sour_soup
56
+ hot_dog
57
+ huevos_rancheros
58
+ hummus
59
+ ice_cream
60
+ lasagna
61
+ lobster_bisque
62
+ lobster_roll_sandwich
63
+ macaroni_and_cheese
64
+ macarons
65
+ miso_soup
66
+ mussels
67
+ nachos
68
+ omelette
69
+ onion_rings
70
+ oysters
71
+ pad_thai
72
+ paella
73
+ pancakes
74
+ panna_cotta
75
+ peking_duck
76
+ pho
77
+ pizza
78
+ pork_chop
79
+ poutine
80
+ prime_rib
81
+ pulled_pork_sandwich
82
+ ramen
83
+ ravioli
84
+ red_velvet_cake
85
+ risotto
86
+ samosa
87
+ sashimi
88
+ scallops
89
+ seaweed_salad
90
+ shrimp_and_grits
91
+ spaghetti_bolognese
92
+ spaghetti_carbonara
93
+ spring_rolls
94
+ steak
95
+ strawberry_shortcake
96
+ sushi
97
+ tacos
98
+ takoyaki
99
+ tiramisu
100
+ tuna_tartare
101
+ waffles
effnetb2_20percent_101classes.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0556fcf79356e837c2c4bb9531c9afb83154098d9fb8f5bcebd6151ede705f9
3
+ size 31837882
examples/example1.jpg ADDED
examples/example2.jpg ADDED
examples/example3.jpg ADDED
model.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ def create_effnet_b2_model(num_classes=101):
4
+ """
5
+ Args: num_classes is total number of classes
6
+
7
+ returns: model and its corresponding model_specific transform
8
+ """
9
+ weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
10
+ transform = torchvision.models.EfficientNet_B2_Weights.DEFAULT.transforms()
11
+ model = torchvision.models.efficientnet_b2(weights=weights)
12
+
13
+ # freeze the parameters from training
14
+ for param in model.parameters():
15
+ param.requires_grad = False
16
+
17
+ # modifying the classifier layer
18
+ model.classifier = torch.nn.Sequential(
19
+ torch.nn.Dropout(p=0.3,inplace=True),
20
+ torch.nn.Linear(in_features=1408,out_features=num_classes)
21
+ )
22
+
23
+ return model,transform
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch==1.12.0
2
+ torchvision==0.13.0
3
+ gradio==3.1.4