razfar commited on
Commit
552fc99
1 Parent(s): 6c58c8e

Add application files

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .DS_Store
README.md CHANGED
@@ -2,7 +2,7 @@
2
  title: Bean Leaf Classification
3
  emoji: ⚡
4
  colorFrom: green
5
- colorTo: gray
6
  sdk: gradio
7
  sdk_version: 3.3.1
8
  app_file: app.py
 
2
  title: Bean Leaf Classification
3
  emoji: ⚡
4
  colorFrom: green
5
+ colorTo: blue
6
  sdk: gradio
7
  sdk_version: 3.3.1
8
  app_file: app.py
app.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import datasets
3
+ from transformers import AutoFeatureExtractor, AutoModelForImageClassification
4
+ import gradio as gr
5
+
6
+ dataset = datasets.load_dataset('beans', 'full_size')
7
+
8
+ extractor = AutoFeatureExtractor.from_pretrained("saved_model_files")
9
+ model = AutoModelForImageClassification.from_pretrained("saved_model_files")
10
+
11
+ labels = dataset['train'].features['labels'].names
12
+
13
+ def classify(im):
14
+ features = extractor(im, return_tensors='pt')
15
+ logits = model(features["pixel_values"])[-1]
16
+ probability = torch.nn.functional.softmax(logits, dim=-1)
17
+ probs = probability[0].detach().numpy()
18
+ confidences = {label: float(probs[i]) for i, label in enumerate(labels)}
19
+ return confidences
20
+
21
+ interface = gr.Interface(
22
+ fn=classify,
23
+ inputs="image",
24
+ outputs="label",
25
+ examples=[
26
+ 'https://datasets-server.huggingface.co/assets/beans/--/default/train/5/image/image.jpg',
27
+ 'https://datasets-server.huggingface.co/assets/beans/--/default/train/10/image/image.jpg',
28
+ 'https://datasets-server.huggingface.co/assets/beans/--/default/train/15/image/image.jpg'
29
+ ],
30
+ title="bean leaf classification",
31
+ description="Input an image of a bean leaf to predict whether it is healthy or diseased.",
32
+ )
33
+
34
+ interface.launch()
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ torch
2
+ gradio
3
+ datasets
4
+ transformers
saved_model_files/config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "angular_leaf_spot",
13
+ "1": "bean_rust",
14
+ "2": "healthy"
15
+ },
16
+ "image_size": 224,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "angular_leaf_spot": "0",
21
+ "bean_rust": "1",
22
+ "healthy": "2"
23
+ },
24
+ "layer_norm_eps": 1e-12,
25
+ "model_type": "vit",
26
+ "num_attention_heads": 12,
27
+ "num_channels": 3,
28
+ "num_hidden_layers": 12,
29
+ "patch_size": 16,
30
+ "problem_type": "single_label_classification",
31
+ "qkv_bias": true,
32
+ "torch_dtype": "float32",
33
+ "transformers_version": "4.22.1"
34
+ }
saved_model_files/preprocessor_config.json ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_resize": true,
4
+ "feature_extractor_type": "ViTFeatureExtractor",
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_std": [
11
+ 0.5,
12
+ 0.5,
13
+ 0.5
14
+ ],
15
+ "resample": 2,
16
+ "size": 224
17
+ }
saved_model_files/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:002db3a4f0b9d1561c2ab308a241e4d55f2cc14e8e14071555b908da76adea3a
3
+ size 343270065
saved_model_files/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1197f31be00132694bb541398f4cad509d5b5ee25724067bcfd9673638a83e4a
3
+ size 3375