Spaces:
Runtime error
Runtime error
File size: 2,099 Bytes
08413b8 870a28d 08413b8 25c0ad4 08413b8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 |
import gradio as gr
import torchvision
from torchvision import models
from torch import nn
import torch
from timeit import default_timer as timer
from typing import Tuple, Dict
#class names
with open('class_names.txt', "r") as f:
class_names = [car.strip() for car in f.readlines()]
#model and transforms preparation
effnetb0_weights = models.EfficientNet_B0_Weights.DEFAULT
effnetb0 = torchvision.models.efficientnet_b0(weights = effnetb0_weights)
effnetb0_transforms = effnetb0_weights.transforms()
#freeze params
for param in effnetb0.parameters():
param.requires_grad = False
#change classifier
effnetb0.classifier = nn.Sequential(
nn.Dropout(p=.2),
nn.Linear(in_features = 1280,
out_features = 196)
)
#load saved weights
effnetb0.load_state_dict(torch.load('pretrained_effnetb0_stanford_cars_20_percent.pth',
map_location=torch.device("cpu"))
#predict function
def predict(img) -> Tuple[Dict, float]:
start_time = timer()
#put model into eval mode
effnetb0.eval()
with torch.inference_mode():
pred_logits = effnetb0(img.unsqueeze(0))
pred_probs = torch.softmax(pred_logits, dim = 1)
# Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
end_time = timer()
time = round(end_time - start_time, 5)
return pred_labels_and_probs, time
#gradio app
title = 'effnetb0'
description = 'Pretrained effnetb0 model on stanford cars dataset'
#create example list
example_list = [["examples/" + example] for example in os.listdir("examples")]
# Create Gradio interface
demo = gr.Interface(
fn=predict,
inputs=gr.Image(type="pil"),
outputs=[
gr.Label(num_top_classes=5, label="Predictions"),
gr.Number(label="Prediction time (s)"),
],
examples=example_list,
title=title,
description=description
)
# Launch the app!
demo.launch()
|