David Sembowski commited on
Commit
2451e32
1 Parent(s): 4b26c7e

first commit

Browse files
.gitattributes CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
32
  *.zip filter=lfs diff=lfs merge=lfs -text
33
  *.zst filter=lfs diff=lfs merge=lfs -text
34
  *tfevents* filter=lfs diff=lfs merge=lfs -text
35
+ 09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth filter=lfs diff=lfs merge=lfs -text
09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:37b329ab24c80214f862782cf003468562f4f3eef4b96ebe1296bb096e5e2c36
3
+ size 31313869
app.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ### 1. Imports and class names setup ###
3
+
4
+ import gradio as gr
5
+ import os
6
+ import torchvision
7
+ import torch
8
+ from model import create_effnetb2_model
9
+ from timeit import default_timer as timer
10
+ from typing import Tuple, Dict
11
+
12
+ # Setup class names
13
+ class_names = ["pizza","steak","sushi"]
14
+
15
+
16
+ ### 2. Model and transdorms preparation ###
17
+ effnetb2, effnetb2_transforms = create_effnetb2_model()
18
+
19
+ # Load save weights
20
+ effnetb2.load_state_dict(torch.load("09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth",
21
+ map_location = torch.device('cpu')# Load the model to the CPU
22
+ )
23
+ )
24
+
25
+ ### 3. Predict function ###
26
+ def predict(img) -> Tuple[Dict, float]:
27
+ #Start timer
28
+ start_time = timer()
29
+
30
+ # Transform the input image for use with EffNetB2
31
+ img = effnetb2_transforms(img).unsqueeze(0)
32
+
33
+ # Put the model in eval mode, make prediction
34
+ effnetb2.eval()
35
+ with torch.inference_mode():
36
+ # Pass transformed image trough the model abd turn the prediction logits into prediction probs
37
+ pred_probs = torch.softmax(effnetb2(img), dim = 1)
38
+ # Create a prediction label and prediction probability dictionary
39
+ pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names)) }
40
+
41
+ # Calculate pre time
42
+ end_time = timer()
43
+ pred_time = round(end_time -start_time, 4)
44
+
45
+ # return pred dict and pred time
46
+
47
+ return pred_labels_and_probs, pred_time
48
+
49
+
50
+ # Create example list
51
+ example_list = [["examples/"+example] for example in os.listdir("examples")]
52
+ example_list
53
+
54
+
55
+ ### 4. Gradio App
56
+ # Create title, description and article strings
57
+ title = "FoodVision Mini 🍕🥩🍣"
58
+ description = "An EfficientNetB2 feature extractor computer vision model to classify images of food as pizza, steak or sushi."
59
+ article = "Created at [09. PyTorch Model Deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/)."
60
+
61
+ # Create the Gradio demo
62
+ demo = gr.Interface(fn=predict, # mapping function from input to output
63
+ inputs=gr.Image(type="pil"), # what are the inputs?
64
+ outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
65
+ gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
66
+ examples=example_list,
67
+ title=title,
68
+ description=description,
69
+ article=article)
70
+
71
+ # Launch the demo!
72
+ demo.launch(debug=False) # generate a publically shareable URL?
73
+
74
+
examples/2582289.jpg ADDED
examples/3622237.jpg ADDED
examples/592799.jpg ADDED
model.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ from torch import nn
4
+
5
+ def create_effnetb2_model(num_classes:int=3,
6
+ seed:int=42):
7
+ """Creates an EfficientNetB2 feature extractor model and transforms.
8
+
9
+ Args:
10
+ num_classes (int, optional): number of classes in the classifier head.
11
+ Defaults to 3.
12
+ seed (int, optional): random seed value. Defaults to 42.
13
+
14
+ Returns:
15
+ model (torch.nn.Module): EffNetB2 feature extractor model.
16
+ transforms (torchvision.transforms): EffNetB2 image transforms.
17
+ """
18
+ # 1, 2, 3. Create EffNetB2 pretrained weights, transforms and model
19
+ weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
20
+ transforms = weights.transforms()
21
+ model = torchvision.models.efficientnet_b2(weights=weights)
22
+
23
+ # 4. Freeze all layers in base model
24
+ for param in model.parameters():
25
+ param.requires_grad = False
26
+
27
+ # 5. Change classifier head with random seed for reproducibility
28
+ torch.manual_seed(seed)
29
+ model.classifier = nn.Sequential(
30
+ nn.Dropout(p=0.3, inplace=True),
31
+ nn.Linear(in_features=1408, out_features=num_classes),
32
+ )
33
+
34
+ return model, transforms
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+
2
+ torch == 1.13.0
3
+
4
+ torchvision == 0.14.0
5
+
6
+ gradio == 3.16.1