chansung commited on
Commit
514ba19
1 Parent(s): 10708ea

upload v1667110895 model

Browse files
Files changed (6) hide show
  1. .gitattributes +1 -0
  2. README.md +4 -9
  3. app.py +94 -0
  4. palette.txt +35 -0
  5. requirements.txt +4 -0
  6. test-image.jpg +3 -0
.gitattributes CHANGED
@@ -31,3 +31,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
31
  *.zip filter=lfs diff=lfs merge=lfs -text
32
  *.zst filter=lfs diff=lfs merge=lfs -text
33
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
31
  *.zip filter=lfs diff=lfs merge=lfs -text
32
  *.zst filter=lfs diff=lfs merge=lfs -text
33
  *tfevents* filter=lfs diff=lfs merge=lfs -text
34
+ test-image.jpg filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,12 +1,7 @@
1
  ---
2
- title: Segmentation Training Pipeline
3
- emoji: 🐨
4
- colorFrom: red
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 3.8
8
  app_file: app.py
 
9
  pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: "semantic segmentation with TFX ML pipeline"
3
+ sdk: "gradio"
 
 
 
 
4
  app_file: app.py
5
+ tags: ["tfx", "auto-generated"]
6
  pinned: false
7
+ ---
 
 
app.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import tensorflow as tf
4
+ from huggingface_hub import from_pretrained_keras
5
+ from PIL import Image
6
+
7
+ MODEL_CKPT = "chansung/segmentation-training-pipeline@v1667110895"
8
+ MODEL = from_pretrained_keras(MODEL_CKPT)
9
+
10
+ RESOLTUION = 128
11
+
12
+ RESOLTUION = 128
13
+
14
+ ADE_PALETTE = []
15
+ with open(r"./palette.txt", "r") as fp:
16
+ for line in fp:
17
+ tmp_list = list(map(int, line[:-1].strip("][").split(", ")))
18
+ ADE_PALETTE.append(tmp_list)
19
+
20
+
21
+ def preprocess_input(image: Image) -> tf.Tensor:
22
+ image = np.array(image)
23
+ image = tf.convert_to_tensor(image)
24
+
25
+ image = tf.image.resize(image, (RESOLTUION, RESOLTUION))
26
+ image = image / 255
27
+
28
+ return tf.expand_dims(image, 0)
29
+
30
+
31
+ # The below utility get_seg_overlay() are from:
32
+ # https://github.com/deep-diver/semantic-segmentation-ml-pipeline/blob/main/notebooks/inference_from_SavedModel.ipynb
33
+
34
+
35
+ def get_seg_overlay(image, seg):
36
+ color_seg = np.zeros(
37
+ (seg.shape[0], seg.shape[1], 3), dtype=np.uint8
38
+ ) # height, width, 3
39
+ palette = np.array(ADE_PALETTE)
40
+
41
+ for label, color in enumerate(palette):
42
+ color_seg[seg == label, :] = color
43
+
44
+ # Show image + mask
45
+ img = np.array(image) * 0.5 + color_seg * 0.5
46
+
47
+ img *= 255
48
+ img = np.clip(img, 0, 255)
49
+ img = img.astype(np.uint8)
50
+ return img
51
+
52
+
53
+ def run_model(image: Image) -> tf.Tensor:
54
+ preprocessed_image = preprocess_input(image)
55
+ prediction = MODEL.predict(preprocessed_image)
56
+
57
+ seg_mask = tf.math.argmax(prediction, -1)
58
+ seg_mask = tf.squeeze(seg_mask)
59
+ return seg_mask
60
+
61
+
62
+ def get_predictions(image: Image):
63
+ predicted_segmentation_mask = run_model(image)
64
+ preprocessed_image = preprocess_input(image)
65
+ preprocessed_image = tf.squeeze(preprocessed_image, 0)
66
+
67
+ pred_img = get_seg_overlay(
68
+ preprocessed_image.numpy(), predicted_segmentation_mask.numpy()
69
+ )
70
+ return Image.fromarray(pred_img)
71
+
72
+
73
+ title = (
74
+ "Simple demo for a semantic segmentation model trained on the Sidewalks dataset."
75
+ )
76
+
77
+ description = """
78
+
79
+ Note that the outputs obtained in this demo won't be state-of-the-art. The underlying project has a different objective focusing more on the ops side of
80
+ deploying a semantic segmentation model. For more details, check out the repository: https://github.com/deep-diver/semantic-segmentation-ml-pipeline/.
81
+
82
+ """
83
+
84
+ demo = gr.Interface(
85
+ get_predictions,
86
+ gr.inputs.Image(type="pil"),
87
+ "pil",
88
+ allow_flagging="never",
89
+ title=title,
90
+ description=description,
91
+ examples=[["test-image.jpg"]],
92
+ )
93
+
94
+ demo.launch()
palette.txt ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [0, 0, 0]
2
+ [216, 82, 24]
3
+ [255, 255, 0]
4
+ [125, 46, 141]
5
+ [118, 171, 47]
6
+ [161, 19, 46]
7
+ [255, 0, 0]
8
+ [0, 128, 128]
9
+ [190, 190, 0]
10
+ [0, 255, 0]
11
+ [0, 0, 255]
12
+ [170, 0, 255]
13
+ [84, 84, 0]
14
+ [84, 170, 0]
15
+ [84, 255, 0]
16
+ [170, 84, 0]
17
+ [170, 170, 0]
18
+ [170, 255, 0]
19
+ [255, 84, 0]
20
+ [255, 170, 0]
21
+ [255, 255, 0]
22
+ [33, 138, 200]
23
+ [0, 170, 127]
24
+ [0, 255, 127]
25
+ [84, 0, 127]
26
+ [84, 84, 127]
27
+ [84, 170, 127]
28
+ [84, 255, 127]
29
+ [170, 0, 127]
30
+ [170, 84, 127]
31
+ [170, 170, 127]
32
+ [170, 255, 127]
33
+ [255, 0, 127]
34
+ [255, 84, 127]
35
+ [255, 170, 127]
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ numpy==1.23.0
2
+ pillow==9.1.1
3
+ tensorflow==2.9.0
4
+ huggingface_hub
test-image.jpg ADDED

Git LFS Details

  • SHA256: 337b8ca4bfcfff6dfb6c9cf2b8379b4171880bf7258a272483a42caa77bfe9ae
  • Pointer size: 131 Bytes
  • Size of remote file: 182 kB