File size: 1,698 Bytes
b9f70c4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import os
from pathlib import Path
import random
import torch
from model import create_effnetb2
import gradio as gr
from typing import Dict, Tuple
from time import time

effnetb2, effnetb2_transforms = create_effnetb2(101)
effnetb2.load_state_dict(torch.load(f='effnetB2_101.pth', map_location=torch.device('cpu')))
with open('class_names.txt', 'r') as f:
    class_names = [food.strip() for food in f.readlines()]

def predict(image) -> Tuple[Dict, float]:
    start = time()
    transformed_image = effnetb2_transforms(image).unsqueeze(0)
    effnetb2.eval()
    with torch.inference_mode():
        y_logits = effnetb2(transformed_image)
        probs = torch.softmax(y_logits, dim=1).squeeze()
    pred_labels_and_probs = {class_names[i]: float(probs[i].item()) for i in range(len(class_names))}
    end = time()
    return pred_labels_and_probs, round(end - start, 5)

images = os.listdir('examples')
example_list = [[str('examples/' + x)] for x in images]
# Create title, description and article strings
title = "FoodVision"
description = "An EfficientNetB2 feature extractor computer vision model to classify images of food."

# Create the Gradio demo
demo = gr.Interface(fn=predict, # mapping function from input to output
                    inputs=gr.Image(type="pil"), # what are the inputs?
                    outputs=[gr.Label(num_top_classes=5, label="Predictions"), # what are the outputs?
                             gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
                    examples=example_list,
                    title=title,
                    description=description)

# Launch the demo!
demo.launch(debug=False)