JianyuanWang commited on
Commit
7d0f4d6
1 Parent(s): 7eda08b
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -217,7 +217,7 @@ with gr.Blocks() as demo:
217
  input_images = gr.File(file_count="multiple", label="Input Images", interactive=True)
218
  num_query_images = gr.Slider(minimum=1, maximum=10, step=1, value=4, label="Number of query images (key frames)",
219
  info="More query images usually lead to better reconstruction at lower speeds. If the viewpoint differences between your images are minimal, you can set this value to 1. ")
220
- num_query_points = gr.Slider(minimum=512, maximum=8192, step=1, value=2048, label="Number of query points",
221
  info="More query points usually lead to denser reconstruction at lower speeds.")
222
 
223
  with gr.Column(scale=3):
@@ -232,12 +232,14 @@ with gr.Blocks() as demo:
232
 
233
 
234
  examples = [
235
- [british_museum_video, british_museum_images, 2, 4096],
236
  [apple_video, apple_images, 5, 2048],
237
  [bonsai_video, bonsai_images, 3, 2048],
238
  # [cake_video, cake_images, 3, 2048],
239
  ]
240
 
 
 
241
  gr.Examples(examples=examples,
242
  inputs=[input_video, input_images, num_query_images, num_query_points],
243
  outputs=[reconstruction_output, log_output], # Provide outputs
 
217
  input_images = gr.File(file_count="multiple", label="Input Images", interactive=True)
218
  num_query_images = gr.Slider(minimum=1, maximum=10, step=1, value=4, label="Number of query images (key frames)",
219
  info="More query images usually lead to better reconstruction at lower speeds. If the viewpoint differences between your images are minimal, you can set this value to 1. ")
220
+ num_query_points = gr.Slider(minimum=600, maximum=6000, step=1, value=2048, label="Number of query points",
221
  info="More query points usually lead to denser reconstruction at lower speeds.")
222
 
223
  with gr.Column(scale=3):
 
232
 
233
 
234
  examples = [
235
+ [british_museum_video, british_museum_images, 1, 6000],
236
  [apple_video, apple_images, 5, 2048],
237
  [bonsai_video, bonsai_images, 3, 2048],
238
  # [cake_video, cake_images, 3, 2048],
239
  ]
240
 
241
+
242
+
243
  gr.Examples(examples=examples,
244
  inputs=[input_video, input_images, num_query_images, num_query_points],
245
  outputs=[reconstruction_output, log_output], # Provide outputs