tokenid commited on
Commit
a40f82d
β€’
1 Parent(s): 0e18ab2

lazy cache

Browse files
Files changed (2) hide show
  1. .gitignore +2 -1
  2. app.py +26 -9
.gitignore CHANGED
@@ -1 +1,2 @@
1
- __pycache__
 
 
1
+ __pycache__
2
+ gradio_cached_examples
app.py CHANGED
@@ -18,6 +18,7 @@ from src.pose_estimation import load_model_from_config, estimate_poses, estimate
18
  from src.pose_funcs import find_optimal_poses
19
  from src.utils import spherical_to_cartesian, elu_to_c2w
20
 
 
21
  if torch.cuda.is_available():
22
  _device_ = 'cuda:0'
23
  else:
@@ -240,6 +241,15 @@ def run_pose_refinement(cam_vis, image1, image2, anchor_polar, explored_sph, ref
240
  return final_sph, fig
241
 
242
 
 
 
 
 
 
 
 
 
 
243
  _HEADER_ = '''
244
  # Official πŸ€— Gradio Demo for [ID-Pose: Sparse-view Camera Pose Estimation By Inverting Diffusion Models](https://github.com/xt4d/id-pose)
245
  - ID-Pose accepts input images with NO overlapping appearance.
@@ -267,6 +277,13 @@ def run_demo():
267
  demo = gr.Blocks(title='ID-Pose: Sparse-view Camera Pose Estimation By Inverting Diffusion Models')
268
 
269
  with demo:
 
 
 
 
 
 
 
270
  gr.Markdown(_HEADER_)
271
 
272
  with gr.Row(variant='panel'):
@@ -327,8 +344,10 @@ def run_demo():
327
  ['data/gradio_demo/circo_0.png', 'data/gradio_demo/circo_1.png'],
328
  ],
329
  inputs=[input_image1, input_image2],
 
 
330
  label='Examples (Captured)',
331
- cache_examples=False,
332
  examples_per_page=5
333
  )
334
 
@@ -342,8 +361,10 @@ def run_demo():
342
  ['data/gradio_demo/christ_0.png', 'data/gradio_demo/christ_1.png'],
343
  ],
344
  inputs=[input_image1, input_image2],
 
 
345
  label='Examples (Internet)',
346
- cache_examples=False,
347
  examples_per_page=5
348
  )
349
 
@@ -357,17 +378,13 @@ def run_demo():
357
  ['data/gradio_demo/ride_horse_0.png', 'data/gradio_demo/ride_horse_1.png'],
358
  ],
359
  inputs=[input_image1, input_image2],
 
 
360
  label='Examples (Generated)',
361
- cache_examples=False,
362
  examples_per_page=5
363
  )
364
 
365
- cam_vis = CameraVisualizer([np.eye(4), np.eye(4)], ['Image 1', 'Image 2'], ['red', 'blue'])
366
-
367
- explored_sph = gr.State()
368
- anchor_polar = gr.State()
369
- refined_sph = gr.State()
370
-
371
  run_btn.click(
372
  fn=run_preprocess,
373
  inputs=[input_image1, input_image2, preprocess_chk, seed_value],
 
18
  from src.pose_funcs import find_optimal_poses
19
  from src.utils import spherical_to_cartesian, elu_to_c2w
20
 
21
+
22
  if torch.cuda.is_available():
23
  _device_ = 'cuda:0'
24
  else:
 
241
  return final_sph, fig
242
 
243
 
244
+ def run_example(cam_vis, image1, image2):
245
+
246
+ image1, image2 = run_preprocess(image1, image2, True, 0)
247
+ anchor_polar, explored_sph, fig, _ = run_pose_exploration(cam_vis, image1, image2, 16, 4, 10, 0)
248
+
249
+ return image1, image2, anchor_polar, explored_sph, fig
250
+
251
+
252
+
253
  _HEADER_ = '''
254
  # Official πŸ€— Gradio Demo for [ID-Pose: Sparse-view Camera Pose Estimation By Inverting Diffusion Models](https://github.com/xt4d/id-pose)
255
  - ID-Pose accepts input images with NO overlapping appearance.
 
277
  demo = gr.Blocks(title='ID-Pose: Sparse-view Camera Pose Estimation By Inverting Diffusion Models')
278
 
279
  with demo:
280
+
281
+ cam_vis = CameraVisualizer([np.eye(4), np.eye(4)], ['Image 1', 'Image 2'], ['red', 'blue'])
282
+
283
+ explored_sph = gr.State()
284
+ anchor_polar = gr.State()
285
+ refined_sph = gr.State()
286
+
287
  gr.Markdown(_HEADER_)
288
 
289
  with gr.Row(variant='panel'):
 
344
  ['data/gradio_demo/circo_0.png', 'data/gradio_demo/circo_1.png'],
345
  ],
346
  inputs=[input_image1, input_image2],
347
+ fn=partial(run_example, cam_vis),
348
+ outputs=[processed_image1, processed_image2, anchor_polar, explored_sph, vis_output],
349
  label='Examples (Captured)',
350
+ cache_examples='lazy',
351
  examples_per_page=5
352
  )
353
 
 
361
  ['data/gradio_demo/christ_0.png', 'data/gradio_demo/christ_1.png'],
362
  ],
363
  inputs=[input_image1, input_image2],
364
+ fn=partial(run_example, cam_vis),
365
+ outputs=[processed_image1, processed_image2, anchor_polar, explored_sph, vis_output],
366
  label='Examples (Internet)',
367
+ cache_examples='lazy',
368
  examples_per_page=5
369
  )
370
 
 
378
  ['data/gradio_demo/ride_horse_0.png', 'data/gradio_demo/ride_horse_1.png'],
379
  ],
380
  inputs=[input_image1, input_image2],
381
+ fn=partial(run_example, cam_vis),
382
+ outputs=[processed_image1, processed_image2, anchor_polar, explored_sph, vis_output],
383
  label='Examples (Generated)',
384
+ cache_examples='lazy',
385
  examples_per_page=5
386
  )
387
 
 
 
 
 
 
 
388
  run_btn.click(
389
  fn=run_preprocess,
390
  inputs=[input_image1, input_image2, preprocess_chk, seed_value],