chansung commited on
Commit
6db8928
β€’
1 Parent(s): 011ff3c

upload v1664841204 model

Browse files
Files changed (4) hide show
  1. README.md +0 -12
  2. app.py +52 -0
  3. labels.txt +3 -0
  4. requirements.txt +3 -0
README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: Vit E2e Pipeline Hf Integration
3
- emoji: πŸ’©
4
- colorFrom: blue
5
- colorTo: indigo
6
- sdk: gradio
7
- sdk_version: 3.4
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from PIL import Image
3
+ import tensorflow as tf
4
+ from transformers import ViTFeatureExtractor
5
+ from huggingface_hub import from_pretrained_keras
6
+
7
+ PRETRAIN_CHECKPOINT = "google/vit-base-patch16-224-in21k"
8
+ feature_extractor = ViTFeatureExtractor.from_pretrained(PRETRAIN_CHECKPOINT)
9
+
10
+ # $chansung/vit-e2e-pipeline-hf-integration should be like chansung/test-vit
11
+ # $v1664841204
12
+ MODEL_CKPT = "$chansung/vit-e2e-pipeline-hf-integration@$v1664841204"
13
+ MODEL = from_pretrained_keras(MODEL_CKPT)
14
+
15
+ RESOLTUION = 224
16
+
17
+ labels = []
18
+
19
+ with open(r"labels.txt", "r") as fp:
20
+ for line in fp:
21
+ labels.append(line[:-1])
22
+
23
+ def normalize_img(
24
+ img, mean=feature_extractor.image_mean, std=feature_extractor.image_std
25
+ ):
26
+ img = img / 255
27
+ mean = tf.constant(mean)
28
+ std = tf.constant(std)
29
+ return (img - mean) / std
30
+
31
+ def preprocess_input(image: Image) -> tf.Tensor:
32
+ image = np.array(image)
33
+ image = tf.convert_to_tensor(image)
34
+
35
+ image = tf.image.resize(image, (RESOLTUION, RESOLTUION))
36
+ image = normalize_img(image)
37
+
38
+ image = tf.transpose(
39
+ image, (2, 0, 1)
40
+ ) # Since HF models are channel-first.
41
+
42
+ return {
43
+ "pixel_values": tf.expand_dims(image, 0)
44
+ }
45
+
46
+ def get_predictions(image: Image) -> tf.Tensor:
47
+ preprocessed_image = preprocess_input(image)
48
+ prediction = MODEL.predict(preprocessed_image)
49
+ probs = tf.nn.softmax(prediction['logits'], axis=1)
50
+
51
+ confidences = {labels[i]: float(probs[0][i]) for i in range(3)}
52
+ return confidences
labels.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ angular_leaf_spot
2
+ bean_rust
3
+ healthy
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ tensorflow
2
+ transformers
3
+ huggingface-hub