Spaces:
Running
Running
ondrejbiza
commited on
Commit
•
9b6ff29
1
Parent(s):
9d5d768
V2
Browse files
app.py
CHANGED
@@ -89,29 +89,39 @@ probs = np.zeros((11, 128, 128), dtype=np.float32)
|
|
89 |
|
90 |
with gr.Blocks() as demo:
|
91 |
|
|
|
|
|
|
|
|
|
|
|
92 |
with gr.Row():
|
93 |
|
94 |
-
with gr.Column():
|
95 |
gr_choose_image = gr.Dropdown(
|
96 |
[f"img{i}" for i in range(1, 9)], label="CLEVR Image", info="Start by a picking an image from the CLEVR dataset."
|
97 |
)
|
98 |
-
|
99 |
-
|
100 |
|
101 |
with gr.Column():
|
102 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
|
104 |
gr_y_slider = gr.Slider(-1, 1, value=0, step=0.01, label="x")
|
105 |
gr_x_slider = gr.Slider(-1, 1, value=0, step=0.01, label="y")
|
106 |
gr_sy_slider = gr.Slider(0.01, 1, value=0.1, step=0.01, label="width")
|
107 |
gr_sx_slider = gr.Slider(0.01, 1, value=0.1, step=0.01, label="height")
|
108 |
|
109 |
-
gr_button = gr.Button("Render")
|
110 |
|
111 |
def update_image_and_segmentation(name, idx):
|
112 |
idx = idx - 1
|
113 |
|
114 |
-
img_input,
|
115 |
out = model.apply(
|
116 |
{"params": state.params, **state.variables},
|
117 |
video=img_input[None, None],
|
@@ -119,12 +129,15 @@ with gr.Blocks() as demo:
|
|
119 |
train=False)
|
120 |
|
121 |
probs[:] = nn.softmax(out["outputs"]["segmentation_logits"][0, 0, :, :, :, 0], axis=0)
|
|
|
|
|
|
|
122 |
slots_ = out["states"]
|
123 |
slots[:] = slots_[0, 0, :, :-4]
|
124 |
pos[:] = slots_[0, 0, :, -4: -2]
|
125 |
scale[:] = slots_[0, 0, :, -2:]
|
126 |
|
127 |
-
return img, (probs[idx] * 255).astype(np.uint8), float(pos[idx, 0]), \
|
128 |
float(pos[idx, 1]), float(scale[idx, 0]), float(scale[idx, 1])
|
129 |
|
130 |
gr_choose_image.change(
|
|
|
89 |
|
90 |
with gr.Blocks() as demo:
|
91 |
|
92 |
+
# work in progress
|
93 |
+
# with gr.Row():
|
94 |
+
# gr_gallery = gr.Gallery(value=[f"images/img{i}.png" for i in range(1, 9)])
|
95 |
+
# gr_gallery = gr_gallery.style(columns=[3], rows=[3], object_fit="contain", height="auto")
|
96 |
+
|
97 |
with gr.Row():
|
98 |
|
|
|
99 |
gr_choose_image = gr.Dropdown(
|
100 |
[f"img{i}" for i in range(1, 9)], label="CLEVR Image", info="Start by a picking an image from the CLEVR dataset."
|
101 |
)
|
102 |
+
|
103 |
+
with gr.Row():
|
104 |
|
105 |
with gr.Column():
|
106 |
+
|
107 |
+
gr_image_1 = gr.Image(type="numpy", source="canvas", label="Decoding")
|
108 |
+
gr_image_2 = gr.Image(type="numpy", source="canvas", label="Segmentation")
|
109 |
+
|
110 |
+
with gr.Column():
|
111 |
+
gr_slot_slider = gr.Slider(1, 11, value=1, step=1, label="Slot Index",
|
112 |
+
info="Change slot index too see the segmentation mask, position and scale of each slot.")
|
113 |
|
114 |
gr_y_slider = gr.Slider(-1, 1, value=0, step=0.01, label="x")
|
115 |
gr_x_slider = gr.Slider(-1, 1, value=0, step=0.01, label="y")
|
116 |
gr_sy_slider = gr.Slider(0.01, 1, value=0.1, step=0.01, label="width")
|
117 |
gr_sx_slider = gr.Slider(0.01, 1, value=0.1, step=0.01, label="height")
|
118 |
|
119 |
+
gr_button = gr.Button("Render", info="Render a new image with altered positions and scales.")
|
120 |
|
121 |
def update_image_and_segmentation(name, idx):
|
122 |
idx = idx - 1
|
123 |
|
124 |
+
img_input, _ = load_image(name)
|
125 |
out = model.apply(
|
126 |
{"params": state.params, **state.variables},
|
127 |
video=img_input[None, None],
|
|
|
129 |
train=False)
|
130 |
|
131 |
probs[:] = nn.softmax(out["outputs"]["segmentation_logits"][0, 0, :, :, :, 0], axis=0)
|
132 |
+
img = np.array(out["outputs"]["video"][0, 0])
|
133 |
+
img = np.clip(img, 0, 1)
|
134 |
+
|
135 |
slots_ = out["states"]
|
136 |
slots[:] = slots_[0, 0, :, :-4]
|
137 |
pos[:] = slots_[0, 0, :, -4: -2]
|
138 |
scale[:] = slots_[0, 0, :, -2:]
|
139 |
|
140 |
+
return (img * 255).astype(np.uint8), (probs[idx] * 255).astype(np.uint8), float(pos[idx, 0]), \
|
141 |
float(pos[idx, 1]), float(scale[idx, 0]), float(scale[idx, 1])
|
142 |
|
143 |
gr_choose_image.change(
|