Spaces:
Build error
Build error
amankishore
commited on
Commit
·
4bd20e7
1
Parent(s):
10528ca
Gradio Working!
Browse files
app.py
CHANGED
@@ -30,17 +30,17 @@ def vis_routine(y, depth):
|
|
30 |
|
31 |
with gr.Blocks(css=".gradio-container {max-width: 512px; margin: auto;}") as demo:
|
32 |
# title
|
33 |
-
gr.Markdown('[Score Jacobian Chaining](https://github.com/pals-ttic/sjc) Lifting Pretrained 2D Diffusion Models for 3D Generation')
|
34 |
|
35 |
# inputs
|
36 |
prompt = gr.Textbox(label="Prompt", max_lines=1, value="A high quality photo of a delicious burger")
|
37 |
-
iters = gr.Slider(label="Iters", minimum=
|
38 |
seed = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, randomize=True)
|
39 |
button = gr.Button('Generate')
|
40 |
|
41 |
# outputs
|
42 |
image = gr.Image(label="image", visible=True)
|
43 |
-
depth = gr.Image(label="depth", visible=True)
|
44 |
video = gr.Video(label="video", visible=False)
|
45 |
logs = gr.Textbox(label="logging")
|
46 |
|
@@ -141,78 +141,64 @@ with gr.Blocks(css=".gradio-container {max-width: 512px; margin: auto;}") as dem
|
|
141 |
if isinstance(model, StableDiffusion):
|
142 |
y = model.decode(y)
|
143 |
pane, img, depth = vis_routine(y, depth)
|
|
|
|
|
|
|
|
|
|
|
144 |
|
145 |
# TODO: Output pane, img and depth to Gradio
|
146 |
|
147 |
pbar.update()
|
148 |
pbar.set_description(p)
|
149 |
|
150 |
-
yield {
|
151 |
-
image: gr.update(value=img, visible=True),
|
152 |
-
depth: gr.update(value=depth, visible=True),
|
153 |
-
video: gr.update(visible=False),
|
154 |
-
logs: str(tsr_stats(y)),
|
155 |
-
}
|
156 |
-
|
157 |
# TODO: Save Checkpoint
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
|
|
162 |
|
163 |
-
|
164 |
-
|
165 |
|
166 |
-
|
167 |
|
168 |
-
|
169 |
|
170 |
-
|
171 |
-
y, depth = render_one_view(vox, aabb, H, W, K, pose)
|
172 |
-
if isinstance(model, StableDiffusion):
|
173 |
-
y = model.decode(y)
|
174 |
-
pane, img, depth = vis_routine(y, depth)
|
175 |
|
176 |
-
|
177 |
-
|
|
|
|
|
|
|
178 |
|
179 |
-
|
180 |
-
|
181 |
-
depth: gr.update(value=depth, visible=True),
|
182 |
-
video: gr.update(visible=False),
|
183 |
-
logs: str(tsr_stats(y)),
|
184 |
-
}
|
185 |
-
|
186 |
-
output_video = "view_seq.mp4"
|
187 |
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
for img in seqs:
|
194 |
-
writer.append_data(img)
|
195 |
-
writer.close()
|
196 |
|
197 |
-
|
198 |
-
figs = [imageio.imread(fn) for fn in img_fnames]
|
199 |
-
export_movie(figs, save_fn, fps)
|
200 |
|
201 |
-
|
202 |
|
203 |
-
|
204 |
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
}
|
211 |
|
212 |
button.click(
|
213 |
submit,
|
214 |
[prompt, iters, seed],
|
215 |
-
[image,
|
216 |
)
|
217 |
|
218 |
# concurrency_count: only allow ONE running progress, else GPU will OOM.
|
|
|
30 |
|
31 |
with gr.Blocks(css=".gradio-container {max-width: 512px; margin: auto;}") as demo:
|
32 |
# title
|
33 |
+
gr.Markdown('[Score Jacobian Chaining](https://github.com/pals-ttic/sjc): Lifting Pretrained 2D Diffusion Models for 3D Generation')
|
34 |
|
35 |
# inputs
|
36 |
prompt = gr.Textbox(label="Prompt", max_lines=1, value="A high quality photo of a delicious burger")
|
37 |
+
iters = gr.Slider(label="Iters", minimum=100, maximum=20000, value=10000, step=100)
|
38 |
seed = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, randomize=True)
|
39 |
button = gr.Button('Generate')
|
40 |
|
41 |
# outputs
|
42 |
image = gr.Image(label="image", visible=True)
|
43 |
+
# depth = gr.Image(label="depth", visible=True)
|
44 |
video = gr.Video(label="video", visible=False)
|
45 |
logs = gr.Textbox(label="logging")
|
46 |
|
|
|
141 |
if isinstance(model, StableDiffusion):
|
142 |
y = model.decode(y)
|
143 |
pane, img, depth = vis_routine(y, depth)
|
144 |
+
yield {
|
145 |
+
image: gr.update(value=img, visible=True),
|
146 |
+
video: gr.update(visible=False),
|
147 |
+
logs: f"Steps: {i}/{n_steps}: \n" + str(tsr_stats(y)),
|
148 |
+
}
|
149 |
|
150 |
# TODO: Output pane, img and depth to Gradio
|
151 |
|
152 |
pbar.update()
|
153 |
pbar.set_description(p)
|
154 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
155 |
# TODO: Save Checkpoint
|
156 |
+
with torch.no_grad():
|
157 |
+
ckpt = vox.state_dict()
|
158 |
+
H, W = poser.H, poser.W
|
159 |
+
vox.eval()
|
160 |
+
K, poses = poser.sample_test(100)
|
161 |
|
162 |
+
aabb = vox.aabb.T.cpu().numpy()
|
163 |
+
vox = vox.to(device_glb)
|
164 |
|
165 |
+
num_imgs = len(poses)
|
166 |
|
167 |
+
all_images = []
|
168 |
|
169 |
+
for i in (pbar := tqdm(range(num_imgs))):
|
|
|
|
|
|
|
|
|
170 |
|
171 |
+
pose = poses[i]
|
172 |
+
y, depth = render_one_view(vox, aabb, H, W, K, pose)
|
173 |
+
if isinstance(model, StableDiffusion):
|
174 |
+
y = model.decode(y)
|
175 |
+
pane, img, depth = vis_routine(y, depth)
|
176 |
|
177 |
+
# Save img to output
|
178 |
+
all_images.append(img)
|
|
|
|
|
|
|
|
|
|
|
|
|
179 |
|
180 |
+
yield {
|
181 |
+
image: gr.update(value=img, visible=True),
|
182 |
+
video: gr.update(visible=False),
|
183 |
+
logs: str(tsr_stats(y)),
|
184 |
+
}
|
|
|
|
|
|
|
185 |
|
186 |
+
output_video = "/tmp/tmp.mp4"
|
|
|
|
|
187 |
|
188 |
+
imageio.mimwrite(output_video, all_images, quality=8, fps=10)
|
189 |
|
190 |
+
end_t = time.time()
|
191 |
|
192 |
+
yield {
|
193 |
+
image: gr.update(value=img, visible=False),
|
194 |
+
video: gr.update(value=output_video, visible=True),
|
195 |
+
logs: f"Generation Finished in {(end_t - start_t)/ 60:.4f} minutes!",
|
196 |
+
}
|
|
|
197 |
|
198 |
button.click(
|
199 |
submit,
|
200 |
[prompt, iters, seed],
|
201 |
+
[image, video, logs]
|
202 |
)
|
203 |
|
204 |
# concurrency_count: only allow ONE running progress, else GPU will OOM.
|