Commit
·
79cd580
1
Parent(s):
4f5a2a2
Upload 7 files
Browse filesinitial command
- app.py +22 -0
- efficient_b2.pth +3 -0
- examples/3479599.jpg +0 -0
- examples/476421.jpg +0 -0
- examples/911808.jpg +0 -0
- model.py +9 -0
- predict.py +20 -0
app.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
try:
|
2 |
+
import gradio as gr
|
3 |
+
except:
|
4 |
+
!pip install gradio
|
5 |
+
import gradio as gr
|
6 |
+
from predict import predict_gradio
|
7 |
+
|
8 |
+
title = "Mohammad Ali Food Classification🍔"
|
9 |
+
description = "This demo is just for my training, Thank you Daniel Bourke for your courses"
|
10 |
+
|
11 |
+
|
12 |
+
demo = gr.Interface(
|
13 |
+
predict_gradio,
|
14 |
+
inputs = gr.Image(type="pil"),
|
15 |
+
outputs = [gr.Label(num_top_classes = 3 , label = "All Predictions in All Classes"),
|
16 |
+
gr.Label(num_top_classes = 1 , label = "Model Predicts Image as a")],
|
17 |
+
examples=["/demo/examples/476421.jpg", "/demo/examples/3479599.jpg", "/demo/examples/911808.jpg"],
|
18 |
+
title = title,
|
19 |
+
description = description
|
20 |
+
)
|
21 |
+
|
22 |
+
demo.launch()
|
efficient_b2.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1e5872189fc92a388af899d85b3ccb27560cf1fa19330d3f363ee4f2a9ef8cc3
|
3 |
+
size 31345215
|
examples/3479599.jpg
ADDED
![]() |
examples/476421.jpg
ADDED
![]() |
examples/911808.jpg
ADDED
![]() |
model.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torchvision
|
3 |
+
from torchvision.models import EfficientNet_B2_Weights , efficientnet_b2
|
4 |
+
|
5 |
+
efficient_weight = EfficientNet_B2_Weights.DEFAULT
|
6 |
+
|
7 |
+
efficient_transformer = efficient_weight.transforms()
|
8 |
+
|
9 |
+
efficient_model = torch.load("/demo/efficient_b2.pth").to("cpu")
|
predict.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torchvision
|
3 |
+
from model import efficient_transformer , efficient_model
|
4 |
+
|
5 |
+
CLASS_NAMES = ['pizza', 'steak', 'sushi']
|
6 |
+
|
7 |
+
def predict_gradio(image):
|
8 |
+
|
9 |
+
image = efficient_transformer(image)
|
10 |
+
|
11 |
+
efficient_model.eval()
|
12 |
+
|
13 |
+
with torch.no_grad():
|
14 |
+
pred = efficient_model(torch.unsqueeze(image , dim = 0))
|
15 |
+
|
16 |
+
prediction_per_labels = {CLASS_NAMES[i]: float(torch.sigmoid(pred[0][i])) for i in range(len(CLASS_NAMES))}
|
17 |
+
|
18 |
+
prediction = CLASS_NAMES[torch.argmax(pred).item()]
|
19 |
+
|
20 |
+
return prediction_per_labels , prediction
|