FoodVision_Big / app.py
QuantumQist's picture
Upload 6 files
c08c81e verified
### 1. Imports and class names setup. ###
import gradio as gr
import os
import torch
from model import create_effnet_b2_model
from timeit import default_timer as timer
from typing import Tuple, Dict
# Setup class names
with open("class_names.txt", "r") as f:
class_names = [food.strip() for food in f.readlines()]
### 2. Model and transforms preparation ###
effnetb2, effnetb2_transforms = create_effnet_b2_model(num_classes=101)
# Load save weights
effnetb2.load_state_dict(torch.load(
f='09_pretrained_effnetb2_feature_extractor_food101_20_percent.pth',
map_location = torch.device("cpu") # Load the model to the CPU
))
### 3. Predict function. ###
def predict(img)-> Tuple[Dict, float]:
# Start a timer
start_time = timer()
# Transform the input timage for use with EffNetB2
img = effnetb2_transforms(img).unsqueeze(dim=0) # add batch dimension
# Put model into eval mode, make prediction
effnetb2.eval()
with torch.inference_mode():
# Pass transformed image through the model and turn prediciton logits into probabilities
pred_probs = torch.softmax(effnetb2(img), dim=1)
# Create a prediciton label and predicition probability dictionary
pred_labels_and_probs = {class_names[i]: float(pred_probs[0,i]) for i in range(len(class_names)) }
# Calculate pred time
pred_time = round(timer() - start_time, 4)
# Return pred dict and pred time
return pred_labels_and_probs, pred_time
### 4. Gradio app ###
# Create example list
example_list = [["examples/" + example for example in os.listdir("examples")]]
# Create title, description and article
title = "FoodVision Big - 🥟"
description = "An [EfficientNetB2 feature extractor](https://pytorch.org/vision/stable/models/generated/torchvision.models.efficientnet_b2.html#torchvision.models.efficientnet_b2) computer vision model to classify images [101 classes of food from the Food101 dataset](https://github.com/mrdbourke/pytorch-deep-learning/blob/main/extras/food101_class_names.txt)."
article = "Created at [09. PyTorch Model Deployment](https://www.learnpytorch.io/09_pytorch_model_deployment/#11-turning-our-foodvision-big-model-into-a-deployable-app)."
demo = gr.Interface(
fn=predict, # maps inputs to outputs,
inputs = gr.Image(type="pil"),
outputs = [gr.Label(num_top_classes=5, label = "Predicitons"),
gr.Number(label= "Prediction time (s)")],
examples=example_list,
title=title,
description=description,
article=article)
# Launch the demo
demo.launch()