Arikkod commited on
Commit
f4b3c53
β€’
1 Parent(s): 18d86fe

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -19
app.py CHANGED
@@ -3,6 +3,7 @@ import os
3
  import torch
4
  from model import create_effnetb2_model
5
  from timeit import default_timer as timer
 
6
  from typing import Tuple, Dict
7
 
8
  class_names = ['pizza', 'steak', 'sushi']
@@ -14,14 +15,16 @@ effnetb2.load_state_dict(
14
  )
15
  )
16
 
17
- def predict(img):
 
 
18
  # Start a timer
19
  start_time = timer()
20
- # Transform the input image for use wit EffNetB2
21
  img = effnetb2_transforms(img).unsqueeze(0)
22
  # Put model into eval mode, make prediction
23
  effnetb2.eval()
24
- with torch.inference_mode():
25
  pred_probs = torch.softmax(effnetb2(img), dim=1)
26
  # Create a prediction labal and prediction probability dictionary
27
  pred_labels_and_probs = {class_names[i]:float(pred_probs[0][i]) for i in range(len(class_names))}
@@ -32,20 +35,26 @@ def predict(img):
32
  return pred_labels_and_probs, pred_time
33
 
34
 
35
- title = 'FoodVision Mini πŸ•πŸ₯©πŸ£'
 
 
 
36
  description = 'An [EfficientNetB2 feature extractor](https://pytorch.org/vision/main/models/generated/torchvision.models.efficientnet_b2.html)'
37
- article = 'Created with Pytorch model deployment'
38
- example_list = [["./examples/" + file] for file in os.listdir("./examples")]
39
-
40
- demo = gr.Interface(fn=predict,
41
- inputs=gr.Image(type='pil'),
42
- outputs=[gr.Label(num_top_classes=3, label='Predictions'),
43
- gr.Number(label='Prediction time (s)')],
44
- examples=example_list,
45
- title=title,
46
- description=description,
47
- article=article
48
- )
49
-
50
- demo.launch(debug=False,
51
- share=False)
 
 
 
 
3
  import torch
4
  from model import create_effnetb2_model
5
  from timeit import default_timer as timer
6
+ from PIL import Image
7
  from typing import Tuple, Dict
8
 
9
  class_names = ['pizza', 'steak', 'sushi']
 
15
  )
16
  )
17
 
18
+ ### Prediction function: EffNetB2 ###
19
+
20
+ def predict(img: Image.Image) -> Tuple[Dict[str, float], float]:
21
  # Start a timer
22
  start_time = timer()
23
+ # Transform the input image for use with EffNetB2
24
  img = effnetb2_transforms(img).unsqueeze(0)
25
  # Put model into eval mode, make prediction
26
  effnetb2.eval()
27
+ with torch.no_grad():
28
  pred_probs = torch.softmax(effnetb2(img), dim=1)
29
  # Create a prediction labal and prediction probability dictionary
30
  pred_labels_and_probs = {class_names[i]:float(pred_probs[0][i]) for i in range(len(class_names))}
 
35
  return pred_labels_and_probs, pred_time
36
 
37
 
38
+ ### Gradio app ###
39
+
40
+ # Create title, description and article strings
41
+ title = "FoodVision Mini πŸ•πŸ₯©πŸ£"
42
  description = 'An [EfficientNetB2 feature extractor](https://pytorch.org/vision/main/models/generated/torchvision.models.efficientnet_b2.html)'
43
+ article = 'Created by Arik Kodenzov with Pytorch model deployment'
44
+
45
+ # Create examples list from "examples/" directory
46
+ example_list = [["examples/" + example] for example in os.listdir("examples")]
47
+
48
+ # Create the Gradio demo
49
+ demo = gr.Interface(fn=predict, # mapping function from input to output
50
+ inputs=gr.Image(type="pil"), # what are the inputs?
51
+ outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
52
+ gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
53
+ # Create examples list from "examples/" directory
54
+ examples=example_list,
55
+ title=title,
56
+ description=description,
57
+ article=article)
58
+
59
+ # Launch the demo!
60
+ demo.launch()