cgh / app.py
Shohag's picture
Upload 7 files
9d175a8
import gradio as gr
import os
import torch
from model import create_effnetb2_model
from timeit import default_timer as timer
from typing import Tuple, Dict
class_names= ['pizza','steak','sushi']
effnetb2, effnetb2_transforms= create_effnetb2_model(num_classes= 3)
effnetb2.load_state_dict(torch.load(f="09_pretrained_effnetb2_feature_extractor__pizza_steak_sushi_20_percent.pth",map_location=
torch.device("cpu")
))
def predict(img)-> Tuple[Dict, float]:
start_time= timer()
img= effnetb2_transforms(img).unsqueeze(0)
effnetb2.eval()
with torch.inference_mode():
pred_probs= torch.softmax(effnetb2(img), dim= 1)
pred_labels_and_probs= {class_names[i]: float(pred_probs[0][i]) for i in range (len(class_names))}
pred_time=round(timer()- start_time, 5)
return pred_labels_and_probs, pred_time
titile= "Foodvision Mini"
description="An efficientnetb2 feature extractor computer vision model to classify images of pizza, steak and sushi."
article= "Created at [09_Pytorch model deployment] (https://www.learnpytorch.io/09_pytorch_model_deployment/)"
example_list= [["examples/"+ example] for example in os.listdir("examples")]
demo= gr.Interface(fn= predict, inputs= gr.Image(type="pil"), outputs= [gr.Label(num_top_classes= 3, label= "predictions"),
gr.Number(label= "prediction time (s)")],
example_list= example_list, title= title, description= description, article= article)
demo.launch()