benbatman's picture
application commit
ed98ab9
# 1. Imports and class names
import gradio as gr
import os
import torch
from model import create_effnetb2_model
from timeit import default_timer as timer
from typing import Tuple, Dict
# Set up class names
with open("class_names.txt", "r") as f:
class_names = [food_name.strip() for food_name in f.readlines()]
# 2. Model and transforms preparations
# Create model
effnetb2, effnetb2_transforms = create_effnetb2_model(num_classes=101)
# Load saved weights
effnetb2.load_state_dict(
torch.load(
f="09_pretrained_effnetb2_feature_extractor_food101_20_percent.pth",
map_location=torch.device("cpu") # load to CPU
)
)
# 3. Predict function
# Create predict function
def prdict(img) -> Tuple[Dict, float]:
"""
Transforms and performs a prediction on img and returns predictions and time per prediction
"""
# Start the timer
start_time = timer()
# Transform the target image and add a batch dimension
img = effnetb2_transforms(img).unsqueeze(0)
# Put the model into evaluation mode and turn on inference mode
effnetb2.eval()
with torch.inference_mode():
# Pass the transformed iamge through the model and turn the prediction logits into prediction probablities
pred_probs = torch.softmax(effnetb2(img), dim=1)
# Create a prediction label and prediction probability dictionary for each prediction class (required format for Gradio)
pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
# Calculate prediction time
pred_time = round(timer() - start_time, 5)
# 4. Gradio app
# Create title, description and article strings
title = "FoodVision Big"
description = "An EfficientNetB2 feature extractor computer vision model to classify images of food into [101 different classes](https://github.com/mrdbourke/pytorch-deep-learning/blob/main/extras/food101_class_names.txt)."
article = "Created at [09. PyTorch Model Deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/) course."
# Create examples list from "examples/" directory
example_list = [["examples/" + example] for example in os.listdir("examples")]
# Create Gradio interface
demo = gr.Interface(
fn=predict,
inputs=gr.Image(type='pil'),
outputs=[
gr.Label(num_top_classes=5, label="Predictions"),
gr.Number(label="Prediction time (s)")
],
examples=example_list,
title=title,
description=description,
article=article
)
# Launch the app
demo.launch()