Spaces:
Sleeping
Sleeping
RickOWO12344
commited on
Commit
•
233e156
1
Parent(s):
fed3efb
Upload 7 files
Browse files- .gitattributes +1 -0
- app.py +79 -0
- examples/1.jpg +0 -0
- examples/4.JPG +0 -0
- examples/9.jpg +3 -0
- model.py +26 -0
- requirements.txt +3 -0
- transform.py +12 -0
.gitattributes
CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
examples/9.jpg filter=lfs diff=lfs merge=lfs -text
|
app.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import torch
|
4 |
+
|
5 |
+
#from model import ResNet18_model
|
6 |
+
#from transform import transforms_img
|
7 |
+
|
8 |
+
from timeit import default_timer as timer
|
9 |
+
from typing import Tuple, Dict
|
10 |
+
|
11 |
+
# Setup class names
|
12 |
+
class_names = ["Anthracnose", "Chimaera", "Healthy Leaves"]
|
13 |
+
|
14 |
+
### 2. Model and transforms preparation ###
|
15 |
+
|
16 |
+
# Create model
|
17 |
+
result_0 = ResNet18_model(num_classes = len(class_names))
|
18 |
+
|
19 |
+
Transform = transforms_img
|
20 |
+
|
21 |
+
# Load saved weights
|
22 |
+
result_0.load_state_dict(torch.load(f="models/Palm_Leaves_ResNet18.pth",
|
23 |
+
map_location=torch.device("cpu"), # load to CPU
|
24 |
+
)
|
25 |
+
)
|
26 |
+
|
27 |
+
### 3. Predict function ###
|
28 |
+
|
29 |
+
# Create predict function
|
30 |
+
from typing import Tuple, Dict
|
31 |
+
|
32 |
+
def predict(img) -> Tuple[Dict, float]:
|
33 |
+
"""Transforms and performs a prediction on img and returns prediction and time taken.
|
34 |
+
"""
|
35 |
+
# Start the timer
|
36 |
+
start_time = timer()
|
37 |
+
|
38 |
+
# Transform the target image and add a batch dimension
|
39 |
+
img = Transform(img).unsqueeze(0)
|
40 |
+
|
41 |
+
# Put model into evaluation mode and turn on inference mode
|
42 |
+
result_0.eval()
|
43 |
+
with torch.inference_mode():
|
44 |
+
# Pass the transformed image through the model and turn the prediction logits into prediction probabilities
|
45 |
+
pred_probs = torch.softmax(result_0(img), dim=1)
|
46 |
+
|
47 |
+
# Create a prediction label and prediction probability dictionary for each prediction class (this is the required format for Gradio's output parameter)
|
48 |
+
pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
|
49 |
+
|
50 |
+
# Calculate the prediction time
|
51 |
+
pred_time = round(timer() - start_time, 5)
|
52 |
+
|
53 |
+
# Return the prediction dictionary and prediction time
|
54 |
+
return pred_labels_and_probs, pred_time
|
55 |
+
|
56 |
+
### 4. Gradio app ###
|
57 |
+
|
58 |
+
# Create title, description and article strings
|
59 |
+
title = "Potato Leaves Detection AI"
|
60 |
+
description = "Deep Learning model that classify what condition of Palm Leaves."
|
61 |
+
article = "Created by LK."
|
62 |
+
|
63 |
+
# Create examples list from "examples/" directory
|
64 |
+
example_list = [["examples/" + example] for example in os.listdir("examples")]
|
65 |
+
|
66 |
+
# Create the Gradio demo
|
67 |
+
demo = gr.Interface(fn=predict, # mapping function from input to output
|
68 |
+
inputs=gr.Image(type="pil"), # what are the inputs?
|
69 |
+
outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
|
70 |
+
gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
|
71 |
+
# Create examples list from "examples/" directory
|
72 |
+
examples=example_list,
|
73 |
+
title=title,
|
74 |
+
description=description,
|
75 |
+
article=article,
|
76 |
+
allow_flagging = 'never'
|
77 |
+
)
|
78 |
+
# Launch the demo!
|
79 |
+
demo.launch()
|
examples/1.jpg
ADDED
examples/4.JPG
ADDED
examples/9.jpg
ADDED
Git LFS Details
|
model.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torchvision
|
3 |
+
|
4 |
+
from torch import nn
|
5 |
+
import torchvision.models as models
|
6 |
+
|
7 |
+
def ResNet18_model(num_classes:int=3):
|
8 |
+
|
9 |
+
# Create ResNet18 model
|
10 |
+
model_0 = models.resnet18(pretrained=True)
|
11 |
+
|
12 |
+
# Get the length of class_names (one output unit for each class)
|
13 |
+
output_shape = num_classes
|
14 |
+
|
15 |
+
num_ftrs = model_0.fc.in_features
|
16 |
+
|
17 |
+
# Define the number of output classes for your task
|
18 |
+
num_classes = output_shape
|
19 |
+
|
20 |
+
# Replace the last linear layer with a new one that has the right number of output units
|
21 |
+
model_0.fc = torch.nn.Sequential(
|
22 |
+
torch.nn.Linear(num_ftrs, num_classes),
|
23 |
+
torch.nn.Dropout(p=0.2)
|
24 |
+
)
|
25 |
+
|
26 |
+
return model_0
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
torchvision
|
3 |
+
gradio
|
transform.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from torchvision import transforms
|
3 |
+
|
4 |
+
transforms_img = transforms.Compose([
|
5 |
+
|
6 |
+
transforms.Resize((224, 224)), # 1. Reshape all images to 224x224 (though some models may require different sizes)
|
7 |
+
|
8 |
+
transforms.ToTensor(), # 2. Turn image values to between 0 & 1
|
9 |
+
|
10 |
+
transforms.Normalize(mean=[0.1909, 0.1937, 0.1896],
|
11 |
+
std=[0.3242, 0.3258, 0.3336])
|
12 |
+
])
|