amankishore commited on
Commit
4bd20e7
1 Parent(s): 10528ca

Gradio Working!

Browse files
Files changed (1) hide show
  1. app.py +39 -53
app.py CHANGED
@@ -30,17 +30,17 @@ def vis_routine(y, depth):
30
 
31
  with gr.Blocks(css=".gradio-container {max-width: 512px; margin: auto;}") as demo:
32
  # title
33
- gr.Markdown('[Score Jacobian Chaining](https://github.com/pals-ttic/sjc) Lifting Pretrained 2D Diffusion Models for 3D Generation')
34
 
35
  # inputs
36
  prompt = gr.Textbox(label="Prompt", max_lines=1, value="A high quality photo of a delicious burger")
37
- iters = gr.Slider(label="Iters", minimum=1000, maximum=20000, value=10000, step=100)
38
  seed = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, randomize=True)
39
  button = gr.Button('Generate')
40
 
41
  # outputs
42
  image = gr.Image(label="image", visible=True)
43
- depth = gr.Image(label="depth", visible=True)
44
  video = gr.Video(label="video", visible=False)
45
  logs = gr.Textbox(label="logging")
46
 
@@ -141,78 +141,64 @@ with gr.Blocks(css=".gradio-container {max-width: 512px; margin: auto;}") as dem
141
  if isinstance(model, StableDiffusion):
142
  y = model.decode(y)
143
  pane, img, depth = vis_routine(y, depth)
 
 
 
 
 
144
 
145
  # TODO: Output pane, img and depth to Gradio
146
 
147
  pbar.update()
148
  pbar.set_description(p)
149
 
150
- yield {
151
- image: gr.update(value=img, visible=True),
152
- depth: gr.update(value=depth, visible=True),
153
- video: gr.update(visible=False),
154
- logs: str(tsr_stats(y)),
155
- }
156
-
157
  # TODO: Save Checkpoint
158
- ckpt = vox.state_dict()
159
- H, W = poser.H, poser.W
160
- vox.eval()
161
- K, poses = poser.sample_test(100)
 
162
 
163
- aabb = vox.aabb.T.cpu().numpy()
164
- vox = vox.to(device_glb)
165
 
166
- num_imgs = len(poses)
167
 
168
- for i in (pbar := tqdm(range(num_imgs))):
169
 
170
- pose = poses[i]
171
- y, depth = render_one_view(vox, aabb, H, W, K, pose)
172
- if isinstance(model, StableDiffusion):
173
- y = model.decode(y)
174
- pane, img, depth = vis_routine(y, depth)
175
 
176
- # Save img to output
177
- img.save(f"output/{i}.png")
 
 
 
178
 
179
- yield {
180
- image: gr.update(value=img, visible=True),
181
- depth: gr.update(value=depth, visible=True),
182
- video: gr.update(visible=False),
183
- logs: str(tsr_stats(y)),
184
- }
185
-
186
- output_video = "view_seq.mp4"
187
 
188
- def export_movie(seqs, fname, fps=30):
189
- fname = Path(fname)
190
- if fname.suffix == "":
191
- fname = fname.with_suffix(".mp4")
192
- writer = imageio.get_writer(fname, fps=fps)
193
- for img in seqs:
194
- writer.append_data(img)
195
- writer.close()
196
 
197
- def stitch_vis(save_fn, img_fnames, fps=10):
198
- figs = [imageio.imread(fn) for fn in img_fnames]
199
- export_movie(figs, save_fn, fps)
200
 
201
- stitch_vis(output_video, [f"output/{i}.png" for i in range(num_imgs)])
202
 
203
- end_t = time.time()
204
 
205
- yield {
206
- image: gr.update(value=img, visible=False),
207
- depth: gr.update(value=depth, visible=False),
208
- video: gr.update(value=output_video, visible=True),
209
- logs: f"Generation Finished in {(end_t - start_t)/ 60:.4f} minutes!",
210
- }
211
 
212
  button.click(
213
  submit,
214
  [prompt, iters, seed],
215
- [image, depth, video, logs]
216
  )
217
 
218
  # concurrency_count: only allow ONE running progress, else GPU will OOM.
 
30
 
31
  with gr.Blocks(css=".gradio-container {max-width: 512px; margin: auto;}") as demo:
32
  # title
33
+ gr.Markdown('[Score Jacobian Chaining](https://github.com/pals-ttic/sjc): Lifting Pretrained 2D Diffusion Models for 3D Generation')
34
 
35
  # inputs
36
  prompt = gr.Textbox(label="Prompt", max_lines=1, value="A high quality photo of a delicious burger")
37
+ iters = gr.Slider(label="Iters", minimum=100, maximum=20000, value=10000, step=100)
38
  seed = gr.Slider(label="Seed", minimum=0, maximum=2147483647, step=1, randomize=True)
39
  button = gr.Button('Generate')
40
 
41
  # outputs
42
  image = gr.Image(label="image", visible=True)
43
+ # depth = gr.Image(label="depth", visible=True)
44
  video = gr.Video(label="video", visible=False)
45
  logs = gr.Textbox(label="logging")
46
 
 
141
  if isinstance(model, StableDiffusion):
142
  y = model.decode(y)
143
  pane, img, depth = vis_routine(y, depth)
144
+ yield {
145
+ image: gr.update(value=img, visible=True),
146
+ video: gr.update(visible=False),
147
+ logs: f"Steps: {i}/{n_steps}: \n" + str(tsr_stats(y)),
148
+ }
149
 
150
  # TODO: Output pane, img and depth to Gradio
151
 
152
  pbar.update()
153
  pbar.set_description(p)
154
 
 
 
 
 
 
 
 
155
  # TODO: Save Checkpoint
156
+ with torch.no_grad():
157
+ ckpt = vox.state_dict()
158
+ H, W = poser.H, poser.W
159
+ vox.eval()
160
+ K, poses = poser.sample_test(100)
161
 
162
+ aabb = vox.aabb.T.cpu().numpy()
163
+ vox = vox.to(device_glb)
164
 
165
+ num_imgs = len(poses)
166
 
167
+ all_images = []
168
 
169
+ for i in (pbar := tqdm(range(num_imgs))):
 
 
 
 
170
 
171
+ pose = poses[i]
172
+ y, depth = render_one_view(vox, aabb, H, W, K, pose)
173
+ if isinstance(model, StableDiffusion):
174
+ y = model.decode(y)
175
+ pane, img, depth = vis_routine(y, depth)
176
 
177
+ # Save img to output
178
+ all_images.append(img)
 
 
 
 
 
 
179
 
180
+ yield {
181
+ image: gr.update(value=img, visible=True),
182
+ video: gr.update(visible=False),
183
+ logs: str(tsr_stats(y)),
184
+ }
 
 
 
185
 
186
+ output_video = "/tmp/tmp.mp4"
 
 
187
 
188
+ imageio.mimwrite(output_video, all_images, quality=8, fps=10)
189
 
190
+ end_t = time.time()
191
 
192
+ yield {
193
+ image: gr.update(value=img, visible=False),
194
+ video: gr.update(value=output_video, visible=True),
195
+ logs: f"Generation Finished in {(end_t - start_t)/ 60:.4f} minutes!",
196
+ }
 
197
 
198
  button.click(
199
  submit,
200
  [prompt, iters, seed],
201
+ [image, video, logs]
202
  )
203
 
204
  # concurrency_count: only allow ONE running progress, else GPU will OOM.