Spaces:
Runtime error
Runtime error
first commit
Browse files- .gitattributes +1 -0
- 09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth +3 -0
- app.py +89 -0
- examples/2582289.jpg +0 -0
- examples/3622237.jpg +0 -0
- examples/592799.jpg +0 -0
- model.py +40 -0
.gitattributes
CHANGED
@@ -29,3 +29,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
29 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
31 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
29 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
30 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
31 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
32 |
+
09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth filter=lfs diff=lfs merge=lfs -text
|
09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4c01101ba959b24724487b8c84e45ec9d0cc8b2b1de2f6e2fa7eb02be566dfab
|
3 |
+
size 31313869
|
app.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!usr/bin/env python
|
2 |
+
import os
|
3 |
+
import torch
|
4 |
+
|
5 |
+
import gradio as gr
|
6 |
+
|
7 |
+
from model import create_effnetb2_model
|
8 |
+
from timeit import default_timer as timer
|
9 |
+
from typing import Tuple, Dict
|
10 |
+
from PIL import Image
|
11 |
+
|
12 |
+
class_names = ["pizza", "steak", "sushi"]
|
13 |
+
|
14 |
+
# create effnetb2 model
|
15 |
+
effnetb2, effnetb2_transforms = create_effnetb2_model(
|
16 |
+
num_classes=len(class_names),
|
17 |
+
)
|
18 |
+
|
19 |
+
# load saved weights
|
20 |
+
effnetb2.load_state_dict(
|
21 |
+
torch.load(
|
22 |
+
f="09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth",
|
23 |
+
map_location=torch.device("cpu"),
|
24 |
+
)
|
25 |
+
)
|
26 |
+
|
27 |
+
# predict function
|
28 |
+
def predict(img: Image) -> Tuple[Dict, float]:
|
29 |
+
"""Transforms and performs a prediction on an image and returns the prediction
|
30 |
+
and the time taken
|
31 |
+
|
32 |
+
Parameters
|
33 |
+
----------
|
34 |
+
img : Image
|
35 |
+
Image to classify
|
36 |
+
|
37 |
+
Returns
|
38 |
+
-------
|
39 |
+
Tuple[Dict, float]
|
40 |
+
tuple with a dictionary that contains the probability that img belongs to
|
41 |
+
each class and the time taken to make the prediction
|
42 |
+
|
43 |
+
Example: ({"class1": 0.95, "class2": 0.02, "class3": 0.03}, 0.026)
|
44 |
+
"""
|
45 |
+
start = timer()
|
46 |
+
|
47 |
+
# transform target image and add batch dimension
|
48 |
+
img = effnetb2_transforms(img).unsqueeze(0)
|
49 |
+
|
50 |
+
# put model into eval mode
|
51 |
+
effnetb2.eval()
|
52 |
+
with torch.inference_mode():
|
53 |
+
preds_probs = torch.softmax(effnetb2(img), dim=1)
|
54 |
+
|
55 |
+
# create a prediction label and pred prob dictionary
|
56 |
+
pred_labels_and_probs = {
|
57 |
+
class_names[i]: float(preds_probs[0][i]) for i in range(len(class_names))
|
58 |
+
}
|
59 |
+
|
60 |
+
# get prediction time
|
61 |
+
pred_time = round(timer() - start, 5)
|
62 |
+
|
63 |
+
return pred_labels_and_probs, pred_time
|
64 |
+
|
65 |
+
### Gradio app ###
|
66 |
+
title = "FoodVision Mini"
|
67 |
+
description = "An EfficientNetB2 feature extractor computer vision model to classify\
|
68 |
+
images of pizza, steak and sushi"
|
69 |
+
article = "test"
|
70 |
+
|
71 |
+
# create exmaples list from "examples" directory
|
72 |
+
example_list = [["examples/" + example] for example in os.listdir("examples")]
|
73 |
+
|
74 |
+
def main():
|
75 |
+
# create Gradio demo
|
76 |
+
demo = gr.Interface(fn=predict,
|
77 |
+
inputs=gr.Image(type="pil"),
|
78 |
+
outputs=[gr.Label(num_top_classes=3, label="Predictions"),
|
79 |
+
gr.Number(label="Prediction time (s)")],
|
80 |
+
examples=example_list,
|
81 |
+
title=title,
|
82 |
+
description=description,
|
83 |
+
article=article)
|
84 |
+
|
85 |
+
# launch demo
|
86 |
+
demo.launch()
|
87 |
+
|
88 |
+
if __name__ == '__main__':
|
89 |
+
main()
|
examples/2582289.jpg
ADDED
![]() |
examples/3622237.jpg
ADDED
![]() |
examples/592799.jpg
ADDED
![]() |
model.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torchvision
|
3 |
+
|
4 |
+
from torch import nn
|
5 |
+
from typing import Tuple
|
6 |
+
|
7 |
+
def create_effnetb2_model(
|
8 |
+
num_classes: int = 3,
|
9 |
+
seed: int = 42) -> Tuple[torch.nn.Module, torchvision.transforms.Compose]:
|
10 |
+
"""Creates an EfficientNetB2 feature extractor model and transforms
|
11 |
+
|
12 |
+
Parameters
|
13 |
+
----------
|
14 |
+
num_classes : int, optional
|
15 |
+
Number of classes in the classifier head, by default 3
|
16 |
+
seed : int, optional
|
17 |
+
random seed value, by default 42
|
18 |
+
|
19 |
+
Returns
|
20 |
+
-------
|
21 |
+
Tuple[torch.nn.Module, torchvision.transforms.Compose]
|
22 |
+
Tuple[EffnetB2 feature extractor model, EffNetb2 image transforms]
|
23 |
+
"""
|
24 |
+
# Create EffNetB2 pretrained weights, transforms and model
|
25 |
+
weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
|
26 |
+
transforms = weights.transforms()
|
27 |
+
model = torchvision.models.efficientnet_b2(weights=weights)
|
28 |
+
|
29 |
+
# freeze parameters
|
30 |
+
for param in model.parameters():
|
31 |
+
param.requires_grad = False
|
32 |
+
|
33 |
+
# change classifier head
|
34 |
+
torch.manual_seed(seed)
|
35 |
+
model.classifier = nn.Sequential(
|
36 |
+
nn.Dropout(p=0.3, inplace=True),
|
37 |
+
nn.Linear(in_features=1408, out_features=num_classes)
|
38 |
+
)
|
39 |
+
|
40 |
+
return model, transforms
|