catperson1208 commited on
Commit
3fef8b8
1 Parent(s): e5e10bb

firt_commit

Browse files
.gitattributes CHANGED
@@ -33,3 +33,5 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.file_extension filter=lfs diff=lfs merge=lfs -text
37
+ 09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth filter=lfs diff=lfs merge=lfs -text
09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:eff07ee6a9faf1b1cbaf25837bd5990025f46ac083ea629919de57c82a86c157
3
+ size 31314554
app.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ###1. Import classnames setup###
2
+ import gradio as gr
3
+ import os
4
+ import torch
5
+
6
+ from model import create_effnetb2_model
7
+ from timeit import default_timer as timer
8
+ from typing import Tuple,Dict
9
+
10
+ #setup class names
11
+ class_names=["pizza","steak","sushi"]
12
+
13
+ ##2. model and transforms preparation
14
+ effnetb2,effnetb2_transforms=create_effnetb2_model(
15
+ num_classes=3)
16
+
17
+ #load save weight
18
+ effnetb2.load_state_dict(
19
+ torch.load(
20
+ f="09_pretrained_effnetb2_feature_extractor_pizza_steak_sushi_20_percent.pth",
21
+ map_location=torch.device("cpu")#load the model to the CPU
22
+ )
23
+ )
24
+
25
+ ###3. Predict function (predict())###
26
+ def predict(img) -> Tuple[Dict,float]:
27
+ #start a timer
28
+ start_time=timer()
29
+
30
+ # transform the input image for use with EffNetB2
31
+ img=effnetb2_transforms(img).unsqueeze(0)#unsqueeze = add batch dimension on 0th
32
+
33
+ #Put model into eval mode, make prediction
34
+ effnetb2.eval()
35
+ with torch.inference_mode():
36
+ #pass transformed image through the model and turn the prediction logits into probabilities
37
+ pred_probs=torch.softmax(effnetb2(img),dim=1)
38
+
39
+
40
+ #create a prediction label and prediction probability dictionary
41
+ pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
42
+
43
+ #calculate pred time
44
+ end_time=timer()
45
+ pred_time=round(end_time-start_time,4)
46
+
47
+ #return pred dict and pred time
48
+ return pred_labels_and_probs,pred_time
49
+
50
+ ### 4. Gradio app ###
51
+ #create title,description and article
52
+ title = "FoodVision Mini"
53
+ description="An [EfficientNetB2 feature extractor](https://pytorch.org/vision/main/models/efficientnet.html) computer vision model to classify images as pizza,steak or sushi"
54
+ article="Create at [09. PyTorch Model Deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/)"
55
+
56
+ #create example list
57
+ example_list=[["examples/"+example] for example in os.listdir("examples")]
58
+
59
+ # create the Gradio demo
60
+ demo = gr.Interface(fn=predict,#maps inputs to outputs
61
+ inputs=gr.Image(type="pil"),
62
+ outputs=[gr.Label(num_top_classes=3,label="Predictions"),
63
+ gr.Number(label="Prediction time (s)")],
64
+ examples=example_list,
65
+ title=title,
66
+ description=description,
67
+ article=article
68
+ )
69
+
70
+ #Launch the demo!
71
+ demo.launch(debug=False)#print errors locally?
examples/2582289.jpg ADDED
examples/3622237.jpg ADDED
examples/592799.jpg ADDED
foodvision_mini ADDED
@@ -0,0 +1 @@
 
 
1
+ Subproject commit e5e10bb2ef9727760930f348653752f16daba772
model.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+
4
+ from torch import nn
5
+
6
+ def create_effnetb2_model(num_classes:int=3,#default output classes = 3
7
+ seed:int=42):
8
+ weights=torchvision.models.EfficientNet_B2_Weights.DEFAULT
9
+ transforms=weights.transforms()
10
+ model=torchvision.models.efficientnet_b2(weights=weights)
11
+
12
+ # 4. Freeze all layers in the base model
13
+ for param in model.parameters():
14
+ param.requires_grad = False
15
+
16
+ # 5. change classifier head with random seed for reproducibility
17
+ torch.manual_seed(seed)
18
+ model.classifier=nn.Sequential(
19
+ nn.Dropout(p=0.3,inplace=True),
20
+ nn.Linear(in_features=1408,out_features=num_classes)
21
+ )
22
+
23
+ return model,transforms
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ torch == 1.12.0
2
+ torchvision == 0.13.0
3
+ gradio == 3.1.4