syedssh commited on
Commit
46b8d74
1 Parent(s): 1b5eb72
Files changed (1) hide show
  1. app.py +9 -9
app.py CHANGED
@@ -1,4 +1,4 @@
1
- ### 1. Imports and class names setup ###
2
  import gradio as gr
3
  import os
4
  import torch
@@ -33,23 +33,23 @@ def predict(img) -> Tuple[Dict, float]:
33
  """
34
  # Start the timer
35
  start_time = timer()
36
-
37
  # Transform the target image and add a batch dimension
38
  img = effnetb2_transforms(img).unsqueeze(0)
39
-
40
  # Put model into evaluation mode and turn on inference mode
41
  effnetb2.eval()
42
  with torch.inference_mode():
43
  # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
44
  pred_probs = torch.softmax(effnetb2(img), dim=1)
45
-
46
  # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
47
  pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
48
-
49
  # Calculate the prediction time
50
  pred_time = round(timer() - start_time, 5)
51
-
52
- # Return the prediction dictionary and prediction time
53
  return pred_labels_and_probs, pred_time
54
 
55
  ### 4. Gradio app ###
@@ -68,10 +68,10 @@ demo = gr.Interface(fn=predict, # mapping function from input to output
68
  outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
69
  gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
70
  # Create examples list from "examples/" directory
71
- examples=example_list,
72
  title=title,
73
  description=description,
74
  article=article)
75
 
76
  # Launch the demo!
77
- demo.launch()
 
1
+ ### 1. Imports and class names setup ###
2
  import gradio as gr
3
  import os
4
  import torch
 
33
  """
34
  # Start the timer
35
  start_time = timer()
36
+
37
  # Transform the target image and add a batch dimension
38
  img = effnetb2_transforms(img).unsqueeze(0)
39
+
40
  # Put model into evaluation mode and turn on inference mode
41
  effnetb2.eval()
42
  with torch.inference_mode():
43
  # Pass the transformed image through the model and turn the prediction logits into prediction probabilities
44
  pred_probs = torch.softmax(effnetb2(img), dim=1)
45
+
46
  # Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
47
  pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
48
+
49
  # Calculate the prediction time
50
  pred_time = round(timer() - start_time, 5)
51
+
52
+ # Return the prediction dictionary and prediction time
53
  return pred_labels_and_probs, pred_time
54
 
55
  ### 4. Gradio app ###
 
68
  outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
69
  gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
70
  # Create examples list from "examples/" directory
71
+ examples=example_list,
72
  title=title,
73
  description=description,
74
  article=article)
75
 
76
  # Launch the demo!
77
+ demo.launch()