Himgos commited on
Commit
439fb51
β€’
1 Parent(s): e189b6a

Foodvision mini files made on Jupyter Notebook

Browse files
Files changed (4) hide show
  1. 10_pretrained_effnetb2_20_percent.pth +3 -0
  2. app.py +74 -0
  3. model.py +26 -0
  4. requirements.txt +3 -0
10_pretrained_effnetb2_20_percent.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22c766015cd794095e568aa5b7b92da0f29c28fd693b2fd0d9af2f360cede424
3
+ size 31287125
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ### 1. Import and class names setup ###
3
+ import torch
4
+ import torchvision
5
+ import gradio as gr
6
+ import os
7
+
8
+ from torch import nn
9
+ from model import create_effnetb2_model
10
+ from typing import Tuple, Dict
11
+ from timeit import default_timer as timer
12
+
13
+ # Setup class names
14
+ class_names = ['pizza', 'steak', 'sushi']
15
+
16
+
17
+ ### 2. Model and transforms preparation ###
18
+ effnetb2, effnetb2_transforms = create_effnetb2_model()
19
+
20
+ # Load save weights
21
+ effnetb2.load_state_dict(
22
+ torch.load(
23
+ f="10_pretrained_effnetb2_20_percent.pth",
24
+ map_location=torch.device('cpu') # ensure it loads in cpu
25
+ )
26
+ )
27
+
28
+
29
+ ### 3. Predict function ###
30
+ def predict(img) -> Tuple[Dict, float]:
31
+ # Start a timer
32
+ start_time = timer()
33
+
34
+ # Transform the input image for use with EffNetB2
35
+ transformed_image = effnetb2_transforms(img).unsqueeze(0) # Adding batch_dim
36
+
37
+ # Put model into eval mode, make prediction
38
+ effnetb2.eval()
39
+ with torch.inference_mode():
40
+ pred_prob = torch.softmax(effnetb2(transformed_image), dim=1)
41
+
42
+ # Create a prediction label and prediction probability dict
43
+ pred_labels_and_probs = {class_names[i]: float(pred_prob[0][i]) for i in range(len(class_names))}
44
+
45
+ # Calculate pred time
46
+ end_time = timer()
47
+ pred_time = round(end_time - start_time, 4)
48
+
49
+ return pred_labels_and_probs, pred_time
50
+
51
+
52
+
53
+ ### 4. Gradio App
54
+
55
+ # Create title, description and article
56
+ title = "FoodVision Mini πŸ₯©πŸ•πŸ₯"
57
+ description = "An EfficientNetB2 feature extractor CV model to classify food"
58
+ article = "Created at 10. PyTorch Model Deployment"
59
+
60
+ # Create example list
61
+ example_list = [["examples/" + example] for example in os.listdir("examples")]
62
+
63
+ # Create the Gradio Demo
64
+ demo = gr.Interface(fn=predict, #maps input to output
65
+ inputs=gr.Image(type="pil"),
66
+ outputs=[gr.Label(num_top_classes=3, label="Predictions"),
67
+ gr.Number(label="Prediction Time (s)")],
68
+ examples=example_list,
69
+ title=title,
70
+ description=description,
71
+ article=article)
72
+
73
+ demo.launch(debug=False, #print errors locally?
74
+ share=True)
model.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import torch
3
+ import torchvision
4
+
5
+ from torch import nn
6
+
7
+ def create_effnetb2_model(num_classes: int=3,
8
+ seed: int=42):
9
+ # 1, 2, 3 Create Effnetb2 weights, transform and model
10
+ weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
11
+ transforms = weights.transforms()
12
+ model = torchvision.models.efficientnet_b2(weights=weights)
13
+
14
+ # 4. Freeze all layers in the base model
15
+ for param in model.parameters():
16
+ param.requires_grad = False
17
+
18
+ # 5. Change the classifier head with seed42
19
+ torch.manual_seed(seed)
20
+ model.classifier = nn.Sequential(
21
+ nn.Dropout(p=0.3, inplace=True),
22
+ nn.Linear(in_features=1408,
23
+ out_features=num_classes)
24
+ )
25
+
26
+ return model, transforms
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch==2.0.0
2
+ torchvision==0.15.0
3
+ gradio==3.23.0