foodvision_mini / app.py
Himgos's picture
Update app.py
dc5af35
### 1. Import and class names setup ###
import torch
import torchvision
import gradio as gr
import os
from torch import nn
from model import create_effnetb2_model
from typing import Tuple, Dict
from timeit import default_timer as timer
# Setup class names
class_names = ['pizza', 'steak', 'sushi']
### 2. Model and transforms preparation ###
effnetb2, effnetb2_transforms = create_effnetb2_model()
# Load save weights
effnetb2.load_state_dict(
torch.load(
f="10_pretrained_effnetb2_20_percent.pth",
map_location=torch.device('cpu') # ensure it loads in cpu
)
)
### 3. Predict function ###
def predict(img) -> Tuple[Dict, float]:
# Start a timer
start_time = timer()
# Transform the input image for use with EffNetB2
transformed_image = effnetb2_transforms(img).unsqueeze(0) # Adding batch_dim
# Put model into eval mode, make prediction
effnetb2.eval()
with torch.inference_mode():
pred_prob = torch.softmax(effnetb2(transformed_image), dim=1)
# Create a prediction label and prediction probability dict
pred_labels_and_probs = {class_names[i]: float(pred_prob[0][i]) for i in range(len(class_names))}
# Calculate pred time
end_time = timer()
pred_time = round(end_time - start_time, 4)
return pred_labels_and_probs, pred_time
### 4. Gradio App
# Create title, description and article
title = "FoodVision Mini πŸ₯©πŸ•πŸ₯"
description = "An EfficientNetB2 feature extractor CV model to classify food"
article = "Created at 10. PyTorch Model Deployment"
# Create example list
example_list = [["examples/" + example] for example in os.listdir("examples")]
# Create the Gradio Demo
demo = gr.Interface(fn=predict, #maps input to output
inputs=gr.Image(type="pil"),
outputs=[gr.Label(num_top_classes=3, label="Predictions"),
gr.Number(label="Prediction Time (s)")],
examples=example_list,
title=title,
description=description,
article=article)
demo.launch(debug=False)