alyxx
adding all files 062023 -kaiku
732d0e7
### 1. Imports and class names setup ###
import gradio as gr
import os
import torch
from model import create_vit_b_16_swag
from timeit import default_timer as timer
from typing import Tuple, Dict
# Setup class names
class_names = ['apple_pie',
'baby_back_ribs',
'baklava',
'beef_carpaccio',
'beef_tartare',
'beet_salad',
'beignets',
'bibimbap',
'bread_pudding',
'breakfast_burrito',
'bruschetta',
'caesar_salad',
'cannoli',
'caprese_salad',
'carrot_cake',
'ceviche',
'cheese_plate',
'cheesecake',
'chicken_curry',
'chicken_quesadilla',
'chicken_wings',
'chocolate_cake',
'chocolate_mousse',
'churros',
'clam_chowder',
'club_sandwich',
'crab_cakes',
'creme_brulee',
'croque_madame',
'cup_cakes',
'deviled_eggs',
'donuts',
'dumplings',
'edamame',
'eggs_benedict',
'escargots',
'falafel',
'filet_mignon',
'fish_and_chips',
'foie_gras',
'french_fries',
'french_onion_soup',
'french_toast',
'fried_calamari',
'fried_rice',
'frozen_yogurt',
'garlic_bread',
'gnocchi',
'greek_salad',
'grilled_cheese_sandwich',
'grilled_salmon',
'guacamole',
'gyoza',
'hamburger',
'hot_and_sour_soup',
'hot_dog',
'huevos_rancheros',
'hummus',
'ice_cream',
'lasagna',
'lobster_bisque',
'lobster_roll_sandwich',
'macaroni_and_cheese',
'macarons',
'miso_soup',
'mussels',
'nachos',
'omelette',
'onion_rings',
'oysters',
'pad_thai',
'paella',
'pancakes',
'panna_cotta',
'peking_duck',
'pho',
'pizza',
'pork_chop',
'poutine',
'prime_rib',
'pulled_pork_sandwich',
'ramen',
'ravioli',
'red_velvet_cake',
'risotto',
'samosa',
'sashimi',
'scallops',
'seaweed_salad',
'shrimp_and_grits',
'spaghetti_bolognese',
'spaghetti_carbonara',
'spring_rolls',
'steak',
'strawberry_shortcake',
'sushi',
'tacos',
'takoyaki',
'tiramisu',
'tuna_tartare',
'waffles']
### 2. Model and transforms preparation ###
# Create EffNetB0 model
vit_b_16_swag, vit_b_16_swag_transforms = create_vit_b_16_swag()
# Load saved weights
vit_b_16_swag.load_state_dict(
torch.load(
f="vit_b_16_swag_20percent_10epoch.pth",
map_location=torch.device("cpu"), # load to CPU
)
)
### 3. Predict function ###
# Create predict function
def predict(img) -> Tuple[Dict, float]:
"""Transforms and performs a prediction on img and returns prediction and time taken.
"""
# Start the timer
start_time = timer()
# Transform the target image and add a batch dimension
img = vit_b_16_swag_transforms(img).unsqueeze(0)
# Put model into evaluation mode and turn on inference mode
vit_b_16_swag.eval()
with torch.inference_mode():
# Pass the transformed image through the model and turn the prediction logits into prediction probabilities
pred_probs = torch.softmax(vit_b_16_swag(img), dim=1)
# Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
# Calculate the prediction time
pred_time = round(timer() - start_time, 5)
# Return the prediction dictionary and prediction time
return pred_labels_and_probs, pred_time
### 4. Gradio app ###
# Create title, description and article strings
title = "Food Classifier V1"
description = " 20 Percent Food 101 on Vit_b_16 SWAG"
article = "Created at google collab. Documentation at https://medium.com/me/stories/public, Code repository at https://github.com/Alyxx-The-Sniper/CNN "
# Create examples list from "examples/" directory
example_list = [["examples/" + example] for example in os.listdir("examples")]
# Create the Gradio demo
demo = gr.Interface(fn=predict, # mapping function from input to output
inputs=gr.Image(type="pil"), # what are the inputs?
outputs=[gr.Label(num_top_classes=4, label="Predictions"), # what are the outputs?
gr.Number(label="Prediction time (s)")],
# our fn has two outputs, therefore we have two outputs
# Create examples list from "examples/" directory
examples=example_list,
title=title,
description=description,
article=article)
# Launch the demo!
demo.launch()