Update app.py
Browse files
app.py
CHANGED
@@ -1,4 +1,10 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
import torch
|
3 |
from transformers import ViTImageProcessor, AutoFeatureExtractor, AutoModelForImageClassification
|
4 |
|
@@ -9,20 +15,31 @@ model = AutoModelForImageClassification.from_pretrained("saved_model_files")
|
|
9 |
|
10 |
labels = ['angular_leaf_spot', 'bean_rust', 'healthy']
|
11 |
|
|
|
|
|
|
|
|
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
logits = model(features["pixel_values"])[-1]
|
16 |
-
probability = torch.nn.functional.softmax(logits, dim=-1)
|
17 |
-
probs = probability[0].detach().numpy()
|
18 |
-
confidences = {label: float(probs[i]) for i, label in enumerate(labels)}
|
19 |
-
return confidences
|
20 |
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
title = """<h1 id="title">Bean plant health predictor through images of leaves using ViT image classifier</h1>"""
|
23 |
|
24 |
description = """
|
25 |
-
|
26 |
We are asked to build a machine learning-based app they can deploy on a drone to quickly identify diseased plants.
|
27 |
|
28 |
|
@@ -43,8 +60,9 @@ demo = gr.Blocks(css=css, theme=theme)
|
|
43 |
with demo:
|
44 |
gr.Markdown(title)
|
45 |
gr.Markdown(description)
|
|
|
|
|
|
|
|
|
46 |
|
47 |
-
|
48 |
-
interface = gr.Interface(fn=classify, inputs="image", outputs="label")
|
49 |
-
|
50 |
-
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
|
3 |
+
from PIL import Image
|
4 |
+
|
5 |
+
import spaces
|
6 |
+
|
7 |
+
from typing import Dict
|
8 |
import torch
|
9 |
from transformers import ViTImageProcessor, AutoFeatureExtractor, AutoModelForImageClassification
|
10 |
|
|
|
15 |
|
16 |
labels = ['angular_leaf_spot', 'bean_rust', 'healthy']
|
17 |
|
18 |
+
@spaces.GPU(duration=240)
|
19 |
+
def classify(image: Image.Image) -> Dict[str, float]:
|
20 |
+
"""
|
21 |
+
Classify an image of a bean plant leaf into one of several health categories.
|
22 |
|
23 |
+
Args:
|
24 |
+
image (Image.Image): The input image of the bean leaf to be classified.
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
+
Returns:
|
27 |
+
Dict[str, float]: A dictionary where the keys are the health labels
|
28 |
+
(e.g., 'angular_leaf_spot', 'bean_rust', 'healthy') and
|
29 |
+
the values are the confidence scores for each label.
|
30 |
+
"""
|
31 |
+
features = image_processor(image, return_tensors='pt')
|
32 |
+
logits = model(features["pixel_values"])[-1]
|
33 |
+
probability = torch.nn.functional.softmax(logits, dim=-1)
|
34 |
+
probs = probability[0].detach().numpy()
|
35 |
+
confidences = {label: float(probs[i]) for i, label in enumerate(labels)}
|
36 |
+
return confidences
|
37 |
+
|
38 |
+
####### GRADIO APP #######
|
39 |
title = """<h1 id="title">Bean plant health predictor through images of leaves using ViT image classifier</h1>"""
|
40 |
|
41 |
description = """
|
42 |
+
Problem Statement: A farming company that is having issues with diseases affecting their bean plants. The farmers have to constantly monitor the leaves of the plants so that they can immediately treat the leaves if they show any signs of disease.
|
43 |
We are asked to build a machine learning-based app they can deploy on a drone to quickly identify diseased plants.
|
44 |
|
45 |
|
|
|
60 |
with demo:
|
61 |
gr.Markdown(title)
|
62 |
gr.Markdown(description)
|
63 |
+
interface = gr.Interface(fn=classify,
|
64 |
+
inputs="image",
|
65 |
+
outputs="label",
|
66 |
+
examples="images")
|
67 |
|
68 |
+
demo.launch()
|
|
|
|
|
|