File size: 4,395 Bytes
93f1f28
 
931b10a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2c8378d
 
 
 
 
f65e7b6
 
 
 
 
 
 
2c8378d
 
f65e7b6
 
 
 
2c8378d
931b10a
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
#-*- encoding: utf-8 -*-

import gradio as gr

from matplotlib import gridspec
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import tensorflow as tf
from transformers import SegformerFeatureExtractor, TFSegformerForSemanticSegmentation

feature_extractor = SegformerFeatureExtractor.from_pretrained(
    "nvidia/segformer-b4-finetuned-cityscapes-1024-1024"
)
model = TFSegformerForSemanticSegmentation.from_pretrained(
    "nvidia/segformer-b4-finetuned-cityscapes-1024-1024"
)

def ade_palette():
    """ADE20K palette that maps each class to RGB values."""
    return [
        [0, 0, 0],          # black
        [140, 140, 140],    # gray
        [95, 0, 255],       # purple
        [221, 126, 255],    # light purple
        [1, 0, 255],        # blue
        [0, 216, 255],      # light blue
        [35, 164, 26],      # green
        [29, 219, 22],      # light green
        [255, 228, 0],      # yellow
        [255, 187, 0],      # light orange
        [255, 94, 0],       # orange
        [255, 0, 0],        # red
        [255, 167, 167],    # pink
        [153, 56, 0],       # brown
        [207, 166, 54],
        [180, 40, 180],
        [120, 56, 123],
        [45, 56, 28],
        [67, 56, 123],
    ]

labels_list = []

with open(r'labels.txt', 'r') as fp:
    for line in fp:
        labels_list.append(line[:-1])

colormap = np.asarray(ade_palette())

def label_to_color_image(label):
    if label.ndim != 2:
        raise ValueError("Expect 2-D input label")

    if np.max(label) >= len(colormap):
        raise ValueError("label value too large.")
    return colormap[label]

def draw_plot(pred_img, seg):
    fig = plt.figure(figsize=(20, 15))

    grid_spec = gridspec.GridSpec(1, 2, width_ratios=[6, 1])

    plt.subplot(grid_spec[0])
    plt.imshow(pred_img)
    plt.axis('off')
    LABEL_NAMES = np.asarray(labels_list)
    FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
    FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)

    unique_labels = np.unique(seg.numpy().astype("uint8"))
    ax = plt.subplot(grid_spec[1])
    plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation="nearest")
    ax.yaxis.tick_right()
    plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
    plt.xticks([], [])
    ax.tick_params(width=0.0, labelsize=25)
    return fig

def sepia(input_img):
    input_img = Image.fromarray(input_img)

    inputs = feature_extractor(images=input_img, return_tensors="tf")
    outputs = model(**inputs)
    logits = outputs.logits

    logits = tf.transpose(logits, [0, 2, 3, 1])
    logits = tf.image.resize(
        logits, input_img.size[::-1]
    )  # We reverse the shape of `image` because `image.size` returns width and height.
    seg = tf.math.argmax(logits, axis=-1)[0]

    color_seg = np.zeros(
        (seg.shape[0], seg.shape[1], 3), dtype=np.uint8
    )  # height, width, 3
    for label, color in enumerate(colormap):
        color_seg[seg.numpy() == label, :] = color

    # Show image + mask
    pred_img = np.array(input_img) * 0.5 + color_seg * 0.5
    pred_img = pred_img.astype(np.uint8)

    fig = draw_plot(pred_img, seg)
    return fig


with gr.Blocks(theme=gr.themes.Monochrome()) as demo:
    with gr.Tab("Semantic Segmentation with Cityscape Image"):
        with gr.Row():
            with gr.Column(scale=1):
                input_gallery = gr.Gallery(label="Select an Image",
                                           value="city_1.jpg",
                                           items=["city_1.jpg", "city_2.jpg", "city_3.jpg",
                                                  "city_4.jpg", "city_5.jpg", "city_6.jpg",
                                                  "city_7.jpg", "city_8.jpg"]).style(grid=3)
                input_image = gr.Image(label="Uploaded Image", interactive=True, type="pil")
                input_gallery.change(fn=lambda x: x, inputs=input_gallery, outputs=input_image)
                process_button = gr.Button("Process Image")
            with gr.Column(scale=2):
                output_image = gr.Plot(label="Segmented Image")
                process_button.click(sepia, inputs=input_image, outputs=output_image)

    with gr.Accordion("Information"):
        gr.Markdown("A Gradio-based page which performs Semantic Segmentation into 19 classes for an example image")


demo.launch()