toshas commited on
Commit
c7adb54
β€’
1 Parent(s): 5f60fb8

upgrade diffusers to use core marigold pipleines

Browse files

change video tab to use vae-lite and relax time and fps
change 3d tab to store in obj format and zip up everything

Files changed (33) hide show
  1. README.md +3 -3
  2. app.py +174 -138
  3. extrude.py +53 -7
  4. gradio_cached_examples/examples_bas/3D model outputs high-res/{f4a69712004b188f4bb0/food_depth_512.glb β†’ 0f57994f5d6ac12c1020/food_depth_512.glb.zip} +2 -2
  5. gradio_cached_examples/examples_bas/3D model outputs high-res/0fbd5fe3c8b8dd4f05d1/einstein_depth_512.stl +0 -3
  6. gradio_cached_examples/examples_bas/3D model outputs high-res/127d9bcaf03fa5f41dd3/food_depth_512.stl.zip +3 -0
  7. gradio_cached_examples/examples_bas/3D model outputs high-res/49a7818bed0f17ce01cf/food_depth_512.stl +0 -3
  8. gradio_cached_examples/examples_bas/3D model outputs high-res/96a98e08d96fd47e5cc6/einstein_depth_512.obj.zip +3 -0
  9. gradio_cached_examples/examples_bas/3D model outputs high-res/a17995f3d4750a0e0bbc/food_depth_512.obj.zip +3 -0
  10. gradio_cached_examples/examples_bas/3D model outputs high-res/b0b93bdcbedf077307ba/coin_depth_512.stl.zip +3 -0
  11. gradio_cached_examples/examples_bas/3D model outputs high-res/b69b15d737b2de6938a5/einstein_depth_512.glb +0 -3
  12. gradio_cached_examples/examples_bas/3D model outputs high-res/c0b00e36b9db31f4e9fd/coin_depth_512.stl +0 -3
  13. gradio_cached_examples/examples_bas/3D model outputs high-res/c7499e9097e58b706e51/einstein_depth_512.glb.zip +3 -0
  14. gradio_cached_examples/examples_bas/3D model outputs high-res/ebe8a8d03fbc1a1fc2bd/coin_depth_512.glb.zip +3 -0
  15. gradio_cached_examples/examples_bas/3D model outputs high-res/ee9ee048f590c0c9a2c8/einstein_depth_512.stl.zip +3 -0
  16. gradio_cached_examples/examples_bas/3D model outputs high-res/ef353a417723b89274d9/coin_depth_512.glb +0 -3
  17. gradio_cached_examples/examples_bas/3D model outputs high-res/fbaa26ffc2eb3654c177/coin_depth_512.obj.zip +3 -0
  18. gradio_cached_examples/examples_bas/3D preview low-res relief highlight/{adb58f0f05db6046581f β†’ 78ff2a583036eab8fe9b}/coin_depth_256.glb +2 -2
  19. gradio_cached_examples/examples_bas/3D preview low-res relief highlight/{393dc8309ae2f31f47e9 β†’ 8feb5fe1e8941c880c40}/food_depth_256.glb +2 -2
  20. gradio_cached_examples/examples_bas/3D preview low-res relief highlight/{d2ca1f18e3da0b06142e β†’ bb26fd8a9d7890806329}/einstein_depth_256.glb +2 -2
  21. gradio_cached_examples/examples_bas/log.csv +3 -3
  22. gradio_cached_examples/examples_video/Depth outputs/{207a3fa61126cf3d1981 β†’ 41ffc6734af27742f841}/obama_depth_16bit.zip +2 -2
  23. gradio_cached_examples/examples_video/Depth outputs/{93ea09fe13fa3ed86f2d β†’ 4d0f30c84219f7e28c5b}/cab_depth_16bit.zip +2 -2
  24. gradio_cached_examples/examples_video/{Output video depth red-near blue-far/a9ebfdb1e7b3856d929a β†’ Depth outputs/7e50f61bc97230e10bc9}/cab_depth_colored.mp4 +2 -2
  25. gradio_cached_examples/examples_video/Depth outputs/{0467a73b0d8d3ade99a7 β†’ abdbb71b14bbbd9e179e}/elephant_depth_colored.mp4 +2 -2
  26. gradio_cached_examples/examples_video/Depth outputs/{93d8ed44918458cb56b7 β†’ c3260a8a07bb43e647b8}/elephant_depth_16bit.zip +2 -2
  27. gradio_cached_examples/examples_video/Depth outputs/{df8f2c089d19ebdb0eef β†’ ffbf959599ab71f64cb8}/obama_depth_colored.mp4 +2 -2
  28. gradio_cached_examples/examples_video/Output video depth red-near blue-far/{2301d8bcf6db4f0588c7 β†’ 575c12e3b6ef94b4620e}/obama_depth_colored.mp4 +2 -2
  29. gradio_cached_examples/examples_video/{Depth outputs/c12711ccd5a2c49cf18c β†’ Output video depth red-near blue-far/6002bb1e4b17b4f06de1}/cab_depth_colored.mp4 +2 -2
  30. gradio_cached_examples/examples_video/Output video depth red-near blue-far/{89a2cce251acce733da7 β†’ d0eb4f4347977106f1f2}/elephant_depth_colored.mp4 +2 -2
  31. gradio_cached_examples/examples_video/log.csv +3 -3
  32. requirements.txt +69 -64
  33. requirements_min.txt +5 -5
README.md CHANGED
@@ -4,19 +4,19 @@ emoji: 🏡️
4
  colorFrom: blue
5
  colorTo: red
6
  sdk: gradio
7
- sdk_version: 4.21.0
8
  app_file: app.py
9
  pinned: true
10
  license: cc-by-sa-4.0
11
  models:
12
- - prs-eth/marigold-lcm-v1-0
13
  hf_oauth: true
14
  hf_oauth_expiration_minutes: 43200
15
  ---
16
 
17
  This is a demo of Marigold-LCM, the state-of-the-art depth estimator for images in the wild.
18
  It combines the power of the original Marigold 10-step estimator and the Latent Consistency Models, delivering high-quality results in as little as one step.
19
- Find out more in our CVPR 2024 paper titled ["Repurposing Diffusion-Based Image Generators for Monocular Depth Estimation"](https://arxiv.org/abs/2312.02145)
20
 
21
  ```
22
  @InProceedings{ke2023repurposing,
 
4
  colorFrom: blue
5
  colorTo: red
6
  sdk: gradio
7
+ sdk_version: 4.32.2
8
  app_file: app.py
9
  pinned: true
10
  license: cc-by-sa-4.0
11
  models:
12
+ - prs-eth/marigold-depth-lcm-v1-0
13
  hf_oauth: true
14
  hf_oauth_expiration_minutes: 43200
15
  ---
16
 
17
  This is a demo of Marigold-LCM, the state-of-the-art depth estimator for images in the wild.
18
  It combines the power of the original Marigold 10-step estimator and the Latent Consistency Models, delivering high-quality results in as little as one step.
19
+ Find out more in our CVPR 2024 Oral paper titled ["Repurposing Diffusion-Based Image Generators for Monocular Depth Estimation"](https://arxiv.org/abs/2312.02145)
20
 
21
  ```
22
  @InProceedings{ke2023repurposing,
app.py CHANGED
@@ -25,12 +25,14 @@ import warnings
25
  import zipfile
26
  from io import BytesIO
27
 
 
28
  import gradio as gr
29
  import imageio as imageio
30
  import numpy as np
31
  import spaces
32
  import torch as torch
33
  from PIL import Image
 
34
  from gradio_imageslider import ImageSlider
35
  from huggingface_hub import login
36
  from tqdm import tqdm
@@ -38,32 +40,31 @@ from tqdm import tqdm
38
  from extrude import extrude_depth_3d
39
  from gradio_patches.examples import Examples
40
  from gradio_patches.flagging import FlagMethod, HuggingFaceDatasetSaver
41
- from marigold_depth_estimation_lcm import MarigoldDepthConsistencyPipeline
42
 
43
  warnings.filterwarnings(
44
  "ignore", message=".*LoginButton created outside of a Blocks context.*"
45
  )
46
 
47
  default_seed = 2024
 
48
 
49
- default_image_denoise_steps = 4
50
  default_image_ensemble_size = 1
51
- default_image_processing_res = 768
52
  default_image_reproducuble = True
53
 
54
  default_video_depth_latent_init_strength = 0.1
55
- default_video_denoise_steps = 1
56
  default_video_ensemble_size = 1
57
- default_video_processing_res = 768
58
- default_video_out_fps = 15
59
- default_video_out_max_frames = 100
60
 
61
  default_bas_plane_near = 0.0
62
  default_bas_plane_far = 1.0
63
  default_bas_embossing = 20
64
- default_bas_denoise_steps = 4
65
  default_bas_ensemble_size = 1
66
- default_bas_processing_res = 768
67
  default_bas_size_longest_px = 512
68
  default_bas_size_longest_cm = 10
69
  default_bas_filter_size = 3
@@ -85,9 +86,9 @@ def process_image_check(path_input):
85
  def process_image(
86
  pipe,
87
  path_input,
88
- denoise_steps=default_image_denoise_steps,
89
  ensemble_size=default_image_ensemble_size,
90
- processing_res=default_image_processing_res,
91
  ):
92
  name_base, name_ext = os.path.splitext(os.path.basename(path_input))
93
  print(f"Processing image {name_base}{name_ext}")
@@ -99,22 +100,23 @@ def process_image(
99
 
100
  input_image = Image.open(path_input)
101
 
 
 
102
  pipe_out = pipe(
103
  input_image,
104
- denoising_steps=denoise_steps,
105
  ensemble_size=ensemble_size,
106
- processing_res=processing_res,
107
- batch_size=1 if processing_res == 0 else 0,
108
- seed=default_seed,
109
- show_progress_bar=False,
110
  )
111
 
112
- depth_pred = pipe_out.depth_np
113
- depth_colored = pipe_out.depth_colored
114
- depth_16bit = (depth_pred * 65535.0).astype(np.uint16)
115
 
116
  np.save(path_out_fp32, depth_pred)
117
- Image.fromarray(depth_16bit).save(path_out_16bit, mode="I;16")
118
  depth_colored.save(path_out_vis)
119
 
120
  return (
@@ -127,10 +129,9 @@ def process_video(
127
  pipe,
128
  path_input,
129
  depth_latent_init_strength=default_video_depth_latent_init_strength,
130
- denoise_steps=default_video_denoise_steps,
131
  ensemble_size=default_video_ensemble_size,
132
- processing_res=default_video_processing_res,
133
- out_fps=default_video_out_fps,
134
  out_max_frames=default_video_out_max_frames,
135
  progress=gr.Progress(),
136
  ):
@@ -146,79 +147,105 @@ def process_video(
146
  path_out_vis = os.path.join(path_output_dir, f"{name_base}_depth_colored.mp4")
147
  path_out_16bit = os.path.join(path_output_dir, f"{name_base}_depth_16bit.zip")
148
 
149
- reader = imageio.get_reader(path_input)
150
 
151
- meta_data = reader.get_meta_data()
152
- fps = meta_data["fps"]
153
- size = meta_data["size"]
154
- duration_sec = meta_data["duration"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
 
156
- if fps <= out_fps:
157
- frame_interval, out_fps = 1, fps
158
- else:
159
- frame_interval = round(fps / out_fps)
160
- out_fps = fps / frame_interval
161
 
162
- out_duration_sec = out_max_frames / out_fps
163
- if duration_sec > out_duration_sec:
164
- gr.Warning(
165
- f"Only the first ~{int(out_duration_sec)} seconds will be processed; "
166
- f"use alternative setups for full processing"
167
- )
168
 
169
- writer = imageio.get_writer(path_out_vis, fps=out_fps)
170
- zipf = zipfile.ZipFile(path_out_16bit, "w", zipfile.ZIP_DEFLATED)
171
- prev_depth_latent = None
172
-
173
- pbar = tqdm(desc="Processing Video", total=out_max_frames)
174
-
175
- out_frame_id = 0
176
- for frame_id, frame in enumerate(reader):
177
- if not (frame_id % frame_interval == 0):
178
- continue
179
- out_frame_id += 1
180
- pbar.update(1)
181
- if out_frame_id > out_max_frames:
182
- break
183
-
184
- frame_pil = Image.fromarray(frame)
185
-
186
- pipe_out = pipe(
187
- frame_pil,
188
- denoising_steps=denoise_steps,
189
- ensemble_size=ensemble_size,
190
- processing_res=processing_res,
191
- match_input_res=False,
192
- batch_size=0,
193
- depth_latent_init=prev_depth_latent,
194
- depth_latent_init_strength=depth_latent_init_strength,
195
- return_depth_latent=True,
196
- seed=default_seed,
197
- show_progress_bar=False,
198
  )
199
 
200
- prev_depth_latent = pipe_out.depth_latent
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
 
202
- processed_frame = pipe_out.depth_colored
203
- processed_frame = imageio.core.util.Array(np.array(processed_frame))
204
- writer.append_data(processed_frame)
205
 
206
- processed_frame = (65535 * np.clip(pipe_out.depth_np, 0.0, 1.0)).astype(
207
- np.uint16
208
- )
209
- processed_frame = Image.fromarray(processed_frame, mode="I;16")
 
210
 
211
- archive_path = os.path.join(
212
- f"{name_base}_depth_16bit", f"{out_frame_id:05d}.png"
213
- )
214
- img_byte_arr = BytesIO()
215
- processed_frame.save(img_byte_arr, format="png")
216
- img_byte_arr.seek(0)
217
- zipf.writestr(archive_path, img_byte_arr.read())
 
 
 
 
 
 
 
 
 
218
 
219
- reader.close()
220
- writer.close()
221
- zipf.close()
 
222
 
223
  return (
224
  path_out_vis,
@@ -232,9 +259,9 @@ def process_bas(
232
  plane_near=default_bas_plane_near,
233
  plane_far=default_bas_plane_far,
234
  embossing=default_bas_embossing,
235
- denoise_steps=default_bas_denoise_steps,
236
  ensemble_size=default_bas_ensemble_size,
237
- processing_res=default_bas_processing_res,
238
  size_longest_px=default_bas_size_longest_px,
239
  size_longest_cm=default_bas_size_longest_cm,
240
  filter_size=default_bas_filter_size,
@@ -257,16 +284,17 @@ def process_bas(
257
 
258
  input_image = Image.open(path_input)
259
 
 
 
260
  pipe_out = pipe(
261
  input_image,
262
- denoising_steps=denoise_steps,
263
  ensemble_size=ensemble_size,
264
- processing_res=processing_res,
265
- seed=default_seed,
266
- show_progress_bar=False,
267
  )
268
 
269
- depth_pred = pipe_out.depth_np * 65535
270
 
271
  def _process_3d(
272
  size_longest_px,
@@ -275,6 +303,7 @@ def process_bas(
275
  scene_lights,
276
  output_model_scale=None,
277
  prepare_for_3d_printing=False,
 
278
  ):
279
  image_rgb_w, image_rgb_h = input_image.width, input_image.height
280
  image_rgb_d = max(image_rgb_w, image_rgb_h)
@@ -294,7 +323,7 @@ def process_bas(
294
  (image_new_w, image_new_h), Image.BILINEAR
295
  ).convert("I").save(image_depth_new)
296
 
297
- path_glb, path_stl = extrude_depth_3d(
298
  image_rgb_new,
299
  image_depth_new,
300
  output_model_scale=(
@@ -312,22 +341,24 @@ def process_bas(
312
  vertex_colors=vertex_colors,
313
  scene_lights=scene_lights,
314
  prepare_for_3d_printing=prepare_for_3d_printing,
 
315
  )
316
 
317
- return path_glb, path_stl
318
 
319
- path_viewer_glb, _ = _process_3d(
320
  256, filter_size, vertex_colors=False, scene_lights=True, output_model_scale=1
321
  )
322
- path_files_glb, path_files_stl = _process_3d(
323
  size_longest_px,
324
  filter_size,
325
  vertex_colors=True,
326
  scene_lights=False,
327
  prepare_for_3d_printing=True,
 
328
  )
329
 
330
- return path_viewer_glb, [path_files_glb, path_files_stl]
331
 
332
 
333
  def run_demo_server(pipe, hf_writer=None):
@@ -461,12 +492,12 @@ def run_demo_server(pipe, hf_writer=None):
461
  )
462
  image_reset_btn = gr.Button(value="Reset")
463
  with gr.Accordion("Advanced options", open=False):
464
- image_denoise_steps = gr.Slider(
465
  label="Number of denoising steps",
466
  minimum=1,
467
  maximum=4,
468
  step=1,
469
- value=default_image_denoise_steps,
470
  )
471
  image_ensemble_size = gr.Slider(
472
  label="Ensemble size",
@@ -475,13 +506,13 @@ def run_demo_server(pipe, hf_writer=None):
475
  step=1,
476
  value=default_image_ensemble_size,
477
  )
478
- image_processing_res = gr.Radio(
479
  [
480
  ("Native", 0),
481
  ("Recommended", 768),
482
  ],
483
  label="Processing resolution",
484
- value=default_image_processing_res,
485
  )
486
  with gr.Column():
487
  image_output_slider = ImageSlider(
@@ -656,12 +687,12 @@ def run_demo_server(pipe, hf_writer=None):
656
  with gr.Accordion(
657
  "3D printing demo: Advanced options", open=False
658
  ):
659
- bas_denoise_steps = gr.Slider(
660
  label="Number of denoising steps",
661
  minimum=1,
662
  maximum=4,
663
  step=1,
664
- value=default_bas_denoise_steps,
665
  )
666
  bas_ensemble_size = gr.Slider(
667
  label="Ensemble size",
@@ -670,13 +701,13 @@ def run_demo_server(pipe, hf_writer=None):
670
  step=1,
671
  value=default_bas_ensemble_size,
672
  )
673
- bas_processing_res = gr.Radio(
674
  [
675
  ("Native", 0),
676
  ("Recommended", 768),
677
  ],
678
  label="Processing resolution",
679
- value=default_bas_processing_res,
680
  )
681
  bas_size_longest_px = gr.Slider(
682
  label="Size (px) of the longest side",
@@ -740,9 +771,9 @@ def run_demo_server(pipe, hf_writer=None):
740
  0.0, # plane_near
741
  0.66, # plane_far
742
  15, # embossing
743
- 4, # denoise_steps
744
  4, # ensemble_size
745
- 768, # processing_res
746
  512, # size_longest_px
747
  10, # size_longest_cm
748
  3, # filter_size
@@ -755,14 +786,14 @@ def run_demo_server(pipe, hf_writer=None):
755
  0.0, # plane_near
756
  0.5, # plane_far
757
  50, # embossing
758
- 2, # denoise_steps
759
  1, # ensemble_size
760
- 768, # processing_res
761
  512, # size_longest_px
762
  10, # size_longest_cm
763
  3, # filter_size
764
  5, # frame_thickness
765
- -15, # frame_near
766
  1, # frame_far
767
  ],
768
  [
@@ -770,9 +801,9 @@ def run_demo_server(pipe, hf_writer=None):
770
  0.0, # plane_near
771
  1.0, # plane_far
772
  20, # embossing
773
- 2, # denoise_steps
774
  4, # ensemble_size
775
- 768, # processing_res
776
  512, # size_longest_px
777
  10, # size_longest_cm
778
  3, # filter_size
@@ -786,9 +817,9 @@ def run_demo_server(pipe, hf_writer=None):
786
  bas_plane_near,
787
  bas_plane_far,
788
  bas_embossing,
789
- bas_denoise_steps,
790
  bas_ensemble_size,
791
- bas_processing_res,
792
  bas_size_longest_px,
793
  bas_size_longest_cm,
794
  bas_filter_size,
@@ -836,9 +867,9 @@ def run_demo_server(pipe, hf_writer=None):
836
  fn=process_pipe_image,
837
  inputs=[
838
  image_input,
839
- image_denoise_steps,
840
  image_ensemble_size,
841
- image_processing_res,
842
  ],
843
  outputs=[image_output_slider, image_output_files],
844
  concurrency_limit=1,
@@ -854,9 +885,9 @@ def run_demo_server(pipe, hf_writer=None):
854
  fn=process_pipe_image,
855
  inputs=[
856
  image_input,
857
- image_denoise_steps,
858
  image_ensemble_size,
859
- image_processing_res,
860
  ],
861
  outputs=[image_output_slider, image_output_files],
862
  concurrency_limit=1,
@@ -868,8 +899,8 @@ def run_demo_server(pipe, hf_writer=None):
868
  None,
869
  None,
870
  default_image_ensemble_size,
871
- default_image_denoise_steps,
872
- default_image_processing_res,
873
  ),
874
  inputs=[],
875
  outputs=[
@@ -877,8 +908,8 @@ def run_demo_server(pipe, hf_writer=None):
877
  image_output_slider,
878
  image_output_files,
879
  image_ensemble_size,
880
- image_denoise_steps,
881
- image_processing_res,
882
  ],
883
  queue=False,
884
  )
@@ -902,9 +933,9 @@ def run_demo_server(pipe, hf_writer=None):
902
  if hf_writer is not None:
903
  share_components = [
904
  image_input,
905
- image_denoise_steps,
906
  image_ensemble_size,
907
- image_processing_res,
908
  image_output_slider,
909
  share_content_is_legal,
910
  share_transfer_of_rights,
@@ -970,9 +1001,9 @@ def run_demo_server(pipe, hf_writer=None):
970
  bas_plane_near,
971
  bas_plane_far,
972
  bas_embossing,
973
- bas_denoise_steps,
974
  bas_ensemble_size,
975
- bas_processing_res,
976
  bas_size_longest_px,
977
  bas_size_longest_cm,
978
  bas_filter_size,
@@ -993,9 +1024,9 @@ def run_demo_server(pipe, hf_writer=None):
993
  default_bas_plane_near,
994
  default_bas_plane_far,
995
  default_bas_embossing,
996
- default_bas_denoise_steps,
997
  default_bas_ensemble_size,
998
- default_bas_processing_res,
999
  default_bas_size_longest_px,
1000
  default_bas_size_longest_cm,
1001
  default_bas_filter_size,
@@ -1012,9 +1043,9 @@ def run_demo_server(pipe, hf_writer=None):
1012
  bas_plane_near,
1013
  bas_plane_far,
1014
  bas_embossing,
1015
- bas_denoise_steps,
1016
  bas_ensemble_size,
1017
- bas_processing_res,
1018
  bas_size_longest_px,
1019
  bas_size_longest_cm,
1020
  bas_filter_size,
@@ -1036,8 +1067,8 @@ def run_demo_server(pipe, hf_writer=None):
1036
 
1037
 
1038
  def main():
1039
- CHECKPOINT = "prs-eth/marigold-lcm-v1-0"
1040
- CROWD_DATA = "crowddata-marigold-lcm-v1-0-space-v1-0"
1041
 
1042
  os.system("pip freeze")
1043
 
@@ -1046,7 +1077,14 @@ def main():
1046
 
1047
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
1048
 
1049
- pipe = MarigoldDepthConsistencyPipeline.from_pretrained(CHECKPOINT)
 
 
 
 
 
 
 
1050
  try:
1051
  import xformers
1052
 
@@ -1054,12 +1092,10 @@ def main():
1054
  except:
1055
  pass # run without xformers
1056
 
1057
- pipe = pipe.to(device)
1058
-
1059
  hf_writer = None
1060
- if "HF_TOKEN_LOGIN" in os.environ:
1061
  hf_writer = HuggingFaceDatasetSaver(
1062
- os.getenv("HF_TOKEN_LOGIN"),
1063
  CROWD_DATA,
1064
  private=True,
1065
  info_filename="dataset_info.json",
 
25
  import zipfile
26
  from io import BytesIO
27
 
28
+ import diffusers
29
  import gradio as gr
30
  import imageio as imageio
31
  import numpy as np
32
  import spaces
33
  import torch as torch
34
  from PIL import Image
35
+ from diffusers import MarigoldDepthPipeline
36
  from gradio_imageslider import ImageSlider
37
  from huggingface_hub import login
38
  from tqdm import tqdm
 
40
  from extrude import extrude_depth_3d
41
  from gradio_patches.examples import Examples
42
  from gradio_patches.flagging import FlagMethod, HuggingFaceDatasetSaver
 
43
 
44
  warnings.filterwarnings(
45
  "ignore", message=".*LoginButton created outside of a Blocks context.*"
46
  )
47
 
48
  default_seed = 2024
49
+ default_batch_size = 4
50
 
51
+ default_image_num_inference_steps = 4
52
  default_image_ensemble_size = 1
53
+ default_image_processing_resolution = 768
54
  default_image_reproducuble = True
55
 
56
  default_video_depth_latent_init_strength = 0.1
57
+ default_video_num_inference_steps = 1
58
  default_video_ensemble_size = 1
59
+ default_video_processing_resolution = 768
60
+ default_video_out_max_frames = 450
 
61
 
62
  default_bas_plane_near = 0.0
63
  default_bas_plane_far = 1.0
64
  default_bas_embossing = 20
65
+ default_bas_num_inference_steps = 4
66
  default_bas_ensemble_size = 1
67
+ default_bas_processing_resolution = 768
68
  default_bas_size_longest_px = 512
69
  default_bas_size_longest_cm = 10
70
  default_bas_filter_size = 3
 
86
  def process_image(
87
  pipe,
88
  path_input,
89
+ num_inference_steps=default_image_num_inference_steps,
90
  ensemble_size=default_image_ensemble_size,
91
+ processing_resolution=default_image_processing_resolution,
92
  ):
93
  name_base, name_ext = os.path.splitext(os.path.basename(path_input))
94
  print(f"Processing image {name_base}{name_ext}")
 
100
 
101
  input_image = Image.open(path_input)
102
 
103
+ generator = torch.Generator(device=pipe.device).manual_seed(default_seed)
104
+
105
  pipe_out = pipe(
106
  input_image,
107
+ num_inference_steps=num_inference_steps,
108
  ensemble_size=ensemble_size,
109
+ processing_resolution=processing_resolution,
110
+ batch_size=1 if processing_resolution == 0 else default_batch_size,
111
+ generator=generator,
 
112
  )
113
 
114
+ depth_pred = pipe_out.prediction[0, :, :, 0]
115
+ depth_colored = pipe.image_processor.visualize_depth(pipe_out.prediction)[0]
116
+ depth_16bit = pipe.image_processor.export_depth_to_16bit_png(pipe_out.prediction)[0]
117
 
118
  np.save(path_out_fp32, depth_pred)
119
+ depth_16bit.save(path_out_16bit)
120
  depth_colored.save(path_out_vis)
121
 
122
  return (
 
129
  pipe,
130
  path_input,
131
  depth_latent_init_strength=default_video_depth_latent_init_strength,
132
+ num_inference_steps=default_video_num_inference_steps,
133
  ensemble_size=default_video_ensemble_size,
134
+ processing_resolution=default_video_processing_resolution,
 
135
  out_max_frames=default_video_out_max_frames,
136
  progress=gr.Progress(),
137
  ):
 
147
  path_out_vis = os.path.join(path_output_dir, f"{name_base}_depth_colored.mp4")
148
  path_out_16bit = os.path.join(path_output_dir, f"{name_base}_depth_16bit.zip")
149
 
150
+ generator = torch.Generator(device=pipe.device).manual_seed(default_seed)
151
 
152
+ reader, writer, zipf = None, None, None
153
+ try:
154
+ pipe.vae, pipe.vae_tiny = pipe.vae_tiny, pipe.vae
155
+
156
+ reader = imageio.get_reader(path_input)
157
+
158
+ meta_data = reader.get_meta_data()
159
+ fps = meta_data["fps"]
160
+ size = meta_data["size"]
161
+ max_orig = max(size)
162
+ duration_sec = meta_data["duration"]
163
+ total_frames = int(fps * duration_sec)
164
+
165
+ out_duration_sec = out_max_frames / fps
166
+ if duration_sec > out_duration_sec:
167
+ gr.Warning(
168
+ f"Only the first ~{int(out_duration_sec)} seconds will be processed; "
169
+ f"use alternative setups such as ComfyUI Marigold node for full processing"
170
+ )
171
 
172
+ writer = imageio.get_writer(path_out_vis, fps=fps)
 
 
 
 
173
 
174
+ zipf = zipfile.ZipFile(path_out_16bit, "w", zipfile.ZIP_DEFLATED)
 
 
 
 
 
175
 
176
+ last_frame_latent = None
177
+ latent_common = torch.randn(
178
+ (
179
+ 1,
180
+ 4,
181
+ (768 * size[1] + 7 * max_orig) // (8 * max_orig),
182
+ (768 * size[0] + 7 * max_orig) // (8 * max_orig),
183
+ ),
184
+ generator=generator,
185
+ device=pipe.device,
186
+ dtype=torch.float16,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
187
  )
188
 
189
+ out_frame_id = 0
190
+ pbar = tqdm(desc="Processing Video", total=min(out_max_frames, total_frames))
191
+
192
+ for frame_id, frame in enumerate(reader):
193
+ out_frame_id += 1
194
+ pbar.update(1)
195
+ if out_frame_id > out_max_frames:
196
+ break
197
+
198
+ frame_pil = Image.fromarray(frame)
199
+
200
+ latents = latent_common
201
+ if last_frame_latent is not None:
202
+ assert (
203
+ last_frame_latent.shape == latent_common.shape
204
+ ), f"{last_frame_latent.shape}, {latent_common.shape}"
205
+ latents = (
206
+ 1 - depth_latent_init_strength
207
+ ) * latents + depth_latent_init_strength * last_frame_latent
208
+
209
+ pipe_out = pipe(
210
+ frame_pil,
211
+ num_inference_steps=num_inference_steps,
212
+ ensemble_size=ensemble_size,
213
+ processing_resolution=processing_resolution,
214
+ match_input_resolution=False,
215
+ batch_size=1,
216
+ latents=latents,
217
+ output_latent=True,
218
+ )
219
 
220
+ last_frame_latent = pipe_out.latent
 
 
221
 
222
+ processed_frame = pipe.image_processor.visualize_depth( # noqa
223
+ pipe_out.prediction
224
+ )[0]
225
+ processed_frame = imageio.core.util.Array(np.array(processed_frame))
226
+ writer.append_data(processed_frame)
227
 
228
+ archive_path = os.path.join(
229
+ f"{name_base}_depth_16bit", f"{out_frame_id:05d}.png"
230
+ )
231
+ img_byte_arr = BytesIO()
232
+ processed_frame = pipe.image_processor.export_depth_to_16bit_png(
233
+ pipe_out.prediction
234
+ )[0]
235
+ processed_frame.save(img_byte_arr, format="png")
236
+ img_byte_arr.seek(0)
237
+ zipf.writestr(archive_path, img_byte_arr.read())
238
+ finally:
239
+ if zipf is not None:
240
+ zipf.close()
241
+
242
+ if writer is not None:
243
+ writer.close()
244
 
245
+ if reader is not None:
246
+ reader.close()
247
+
248
+ pipe.vae, pipe.vae_tiny = pipe.vae_tiny, pipe.vae
249
 
250
  return (
251
  path_out_vis,
 
259
  plane_near=default_bas_plane_near,
260
  plane_far=default_bas_plane_far,
261
  embossing=default_bas_embossing,
262
+ num_inference_steps=default_bas_num_inference_steps,
263
  ensemble_size=default_bas_ensemble_size,
264
+ processing_resolution=default_bas_processing_resolution,
265
  size_longest_px=default_bas_size_longest_px,
266
  size_longest_cm=default_bas_size_longest_cm,
267
  filter_size=default_bas_filter_size,
 
284
 
285
  input_image = Image.open(path_input)
286
 
287
+ generator = torch.Generator(device=pipe.device).manual_seed(default_seed)
288
+
289
  pipe_out = pipe(
290
  input_image,
291
+ num_inference_steps=num_inference_steps,
292
  ensemble_size=ensemble_size,
293
+ processing_resolution=processing_resolution,
294
+ generator=generator,
 
295
  )
296
 
297
+ depth_pred = pipe_out.prediction[0, :, :, 0] * 65535
298
 
299
  def _process_3d(
300
  size_longest_px,
 
303
  scene_lights,
304
  output_model_scale=None,
305
  prepare_for_3d_printing=False,
306
+ zip_outputs=False,
307
  ):
308
  image_rgb_w, image_rgb_h = input_image.width, input_image.height
309
  image_rgb_d = max(image_rgb_w, image_rgb_h)
 
323
  (image_new_w, image_new_h), Image.BILINEAR
324
  ).convert("I").save(image_depth_new)
325
 
326
+ path_glb, path_stl, path_obj = extrude_depth_3d(
327
  image_rgb_new,
328
  image_depth_new,
329
  output_model_scale=(
 
341
  vertex_colors=vertex_colors,
342
  scene_lights=scene_lights,
343
  prepare_for_3d_printing=prepare_for_3d_printing,
344
+ zip_outputs=zip_outputs,
345
  )
346
 
347
+ return path_glb, path_stl, path_obj
348
 
349
+ path_viewer_glb, _, _ = _process_3d(
350
  256, filter_size, vertex_colors=False, scene_lights=True, output_model_scale=1
351
  )
352
+ path_files_glb, path_files_stl, path_files_obj = _process_3d(
353
  size_longest_px,
354
  filter_size,
355
  vertex_colors=True,
356
  scene_lights=False,
357
  prepare_for_3d_printing=True,
358
+ zip_outputs=True,
359
  )
360
 
361
+ return path_viewer_glb, [path_files_glb, path_files_stl, path_files_obj]
362
 
363
 
364
  def run_demo_server(pipe, hf_writer=None):
 
492
  )
493
  image_reset_btn = gr.Button(value="Reset")
494
  with gr.Accordion("Advanced options", open=False):
495
+ image_num_inference_steps = gr.Slider(
496
  label="Number of denoising steps",
497
  minimum=1,
498
  maximum=4,
499
  step=1,
500
+ value=default_image_num_inference_steps,
501
  )
502
  image_ensemble_size = gr.Slider(
503
  label="Ensemble size",
 
506
  step=1,
507
  value=default_image_ensemble_size,
508
  )
509
+ image_processing_resolution = gr.Radio(
510
  [
511
  ("Native", 0),
512
  ("Recommended", 768),
513
  ],
514
  label="Processing resolution",
515
+ value=default_image_processing_resolution,
516
  )
517
  with gr.Column():
518
  image_output_slider = ImageSlider(
 
687
  with gr.Accordion(
688
  "3D printing demo: Advanced options", open=False
689
  ):
690
+ bas_num_inference_steps = gr.Slider(
691
  label="Number of denoising steps",
692
  minimum=1,
693
  maximum=4,
694
  step=1,
695
+ value=default_bas_num_inference_steps,
696
  )
697
  bas_ensemble_size = gr.Slider(
698
  label="Ensemble size",
 
701
  step=1,
702
  value=default_bas_ensemble_size,
703
  )
704
+ bas_processing_resolution = gr.Radio(
705
  [
706
  ("Native", 0),
707
  ("Recommended", 768),
708
  ],
709
  label="Processing resolution",
710
+ value=default_bas_processing_resolution,
711
  )
712
  bas_size_longest_px = gr.Slider(
713
  label="Size (px) of the longest side",
 
771
  0.0, # plane_near
772
  0.66, # plane_far
773
  15, # embossing
774
+ 4, # num_inference_steps
775
  4, # ensemble_size
776
+ 768, # processing_resolution
777
  512, # size_longest_px
778
  10, # size_longest_cm
779
  3, # filter_size
 
786
  0.0, # plane_near
787
  0.5, # plane_far
788
  50, # embossing
789
+ 2, # num_inference_steps
790
  1, # ensemble_size
791
+ 768, # processing_resolution
792
  512, # size_longest_px
793
  10, # size_longest_cm
794
  3, # filter_size
795
  5, # frame_thickness
796
+ -25, # frame_near
797
  1, # frame_far
798
  ],
799
  [
 
801
  0.0, # plane_near
802
  1.0, # plane_far
803
  20, # embossing
804
+ 2, # num_inference_steps
805
  4, # ensemble_size
806
+ 768, # processing_resolution
807
  512, # size_longest_px
808
  10, # size_longest_cm
809
  3, # filter_size
 
817
  bas_plane_near,
818
  bas_plane_far,
819
  bas_embossing,
820
+ bas_num_inference_steps,
821
  bas_ensemble_size,
822
+ bas_processing_resolution,
823
  bas_size_longest_px,
824
  bas_size_longest_cm,
825
  bas_filter_size,
 
867
  fn=process_pipe_image,
868
  inputs=[
869
  image_input,
870
+ image_num_inference_steps,
871
  image_ensemble_size,
872
+ image_processing_resolution,
873
  ],
874
  outputs=[image_output_slider, image_output_files],
875
  concurrency_limit=1,
 
885
  fn=process_pipe_image,
886
  inputs=[
887
  image_input,
888
+ image_num_inference_steps,
889
  image_ensemble_size,
890
+ image_processing_resolution,
891
  ],
892
  outputs=[image_output_slider, image_output_files],
893
  concurrency_limit=1,
 
899
  None,
900
  None,
901
  default_image_ensemble_size,
902
+ default_image_num_inference_steps,
903
+ default_image_processing_resolution,
904
  ),
905
  inputs=[],
906
  outputs=[
 
908
  image_output_slider,
909
  image_output_files,
910
  image_ensemble_size,
911
+ image_num_inference_steps,
912
+ image_processing_resolution,
913
  ],
914
  queue=False,
915
  )
 
933
  if hf_writer is not None:
934
  share_components = [
935
  image_input,
936
+ image_num_inference_steps,
937
  image_ensemble_size,
938
+ image_processing_resolution,
939
  image_output_slider,
940
  share_content_is_legal,
941
  share_transfer_of_rights,
 
1001
  bas_plane_near,
1002
  bas_plane_far,
1003
  bas_embossing,
1004
+ bas_num_inference_steps,
1005
  bas_ensemble_size,
1006
+ bas_processing_resolution,
1007
  bas_size_longest_px,
1008
  bas_size_longest_cm,
1009
  bas_filter_size,
 
1024
  default_bas_plane_near,
1025
  default_bas_plane_far,
1026
  default_bas_embossing,
1027
+ default_bas_num_inference_steps,
1028
  default_bas_ensemble_size,
1029
+ default_bas_processing_resolution,
1030
  default_bas_size_longest_px,
1031
  default_bas_size_longest_cm,
1032
  default_bas_filter_size,
 
1043
  bas_plane_near,
1044
  bas_plane_far,
1045
  bas_embossing,
1046
+ bas_num_inference_steps,
1047
  bas_ensemble_size,
1048
+ bas_processing_resolution,
1049
  bas_size_longest_px,
1050
  bas_size_longest_cm,
1051
  bas_filter_size,
 
1067
 
1068
 
1069
  def main():
1070
+ CHECKPOINT = "prs-eth/marigold-depth-lcm-v1-0"
1071
+ CROWD_DATA = "crowddata-marigold-depth-lcm-v1-0-space-v1-0"
1072
 
1073
  os.system("pip freeze")
1074
 
 
1077
 
1078
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
1079
 
1080
+ pipe = MarigoldDepthPipeline.from_pretrained(
1081
+ CHECKPOINT, variant="fp16", torch_dtype=torch.float16
1082
+ ).to(device)
1083
+ pipe.vae_tiny = diffusers.AutoencoderTiny.from_pretrained(
1084
+ "madebyollin/taesd", torch_dtype=torch.float16
1085
+ ).to(device)
1086
+ pipe.set_progress_bar_config(disable=True)
1087
+
1088
  try:
1089
  import xformers
1090
 
 
1092
  except:
1093
  pass # run without xformers
1094
 
 
 
1095
  hf_writer = None
1096
+ if "HF_TOKEN_LOGIN_WRITE_CROWD" in os.environ:
1097
  hf_writer = HuggingFaceDatasetSaver(
1098
+ os.getenv("HF_TOKEN_LOGIN_WRITE_CROWD"),
1099
  CROWD_DATA,
1100
  private=True,
1101
  info_filename="dataset_info.json",
extrude.py CHANGED
@@ -20,11 +20,13 @@
20
 
21
  import math
22
  import os
 
23
 
24
  import numpy as np
25
  import pygltflib
26
  import trimesh
27
- from PIL import Image, ImageFilter
 
28
 
29
 
30
  def quaternion_multiply(q1, q2):
@@ -103,6 +105,7 @@ def glb_add_lights(path_input, path_output):
103
  def extrude_depth_3d(
104
  path_rgb,
105
  path_depth,
 
106
  output_model_scale=100,
107
  filter_size=3,
108
  coef_near=0.0,
@@ -114,6 +117,7 @@ def extrude_depth_3d(
114
  vertex_colors=True,
115
  scene_lights=True,
116
  prepare_for_3d_printing=False,
 
117
  ):
118
  f_far_inner = -emboss
119
  f_far_outer = f_far_inner - f_back
@@ -121,12 +125,11 @@ def extrude_depth_3d(
121
  f_near = max(f_near, f_far_inner)
122
 
123
  depth_image = Image.open(path_depth)
124
- assert depth_image.mode == "I", depth_image.mode
125
- depth_image = depth_image.filter(ImageFilter.MedianFilter(size=filter_size))
126
 
127
  w, h = depth_image.size
128
  d_max = max(w, h)
129
  depth_image = np.array(depth_image).astype(np.double)
 
130
  z_min, z_max = np.min(depth_image), np.max(depth_image)
131
  depth_image = (depth_image.astype(np.double) - z_min) / (z_max - z_min)
132
  depth_image[depth_image < coef_near] = coef_near
@@ -341,14 +344,57 @@ def extrude_depth_3d(
341
  )
342
  mesh.apply_transform(rotation_mat)
343
 
344
- path_out_base = os.path.splitext(path_depth)[0].replace("_16bit", "")
 
345
  path_out_glb = path_out_base + ".glb"
346
  path_out_stl = path_out_base + ".stl"
 
347
 
348
  mesh.export(path_out_glb, file_type="glb")
349
  if scene_lights:
350
  glb_add_lights(path_out_glb, path_out_glb)
351
-
352
  mesh.export(path_out_stl, file_type="stl")
353
-
354
- return path_out_glb, path_out_stl
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  import math
22
  import os
23
+ import zipfile
24
 
25
  import numpy as np
26
  import pygltflib
27
  import trimesh
28
+ from PIL import Image
29
+ from scipy.ndimage import median_filter
30
 
31
 
32
  def quaternion_multiply(q1, q2):
 
105
  def extrude_depth_3d(
106
  path_rgb,
107
  path_depth,
108
+ path_out_base=None,
109
  output_model_scale=100,
110
  filter_size=3,
111
  coef_near=0.0,
 
117
  vertex_colors=True,
118
  scene_lights=True,
119
  prepare_for_3d_printing=False,
120
+ zip_outputs=False,
121
  ):
122
  f_far_inner = -emboss
123
  f_far_outer = f_far_inner - f_back
 
125
  f_near = max(f_near, f_far_inner)
126
 
127
  depth_image = Image.open(path_depth)
 
 
128
 
129
  w, h = depth_image.size
130
  d_max = max(w, h)
131
  depth_image = np.array(depth_image).astype(np.double)
132
+ depth_image = median_filter(depth_image, size=filter_size)
133
  z_min, z_max = np.min(depth_image), np.max(depth_image)
134
  depth_image = (depth_image.astype(np.double) - z_min) / (z_max - z_min)
135
  depth_image[depth_image < coef_near] = coef_near
 
344
  )
345
  mesh.apply_transform(rotation_mat)
346
 
347
+ if path_out_base is None:
348
+ path_out_base = os.path.splitext(path_depth)[0].replace("_16bit", "")
349
  path_out_glb = path_out_base + ".glb"
350
  path_out_stl = path_out_base + ".stl"
351
+ path_out_obj = path_out_base + ".obj"
352
 
353
  mesh.export(path_out_glb, file_type="glb")
354
  if scene_lights:
355
  glb_add_lights(path_out_glb, path_out_glb)
 
356
  mesh.export(path_out_stl, file_type="stl")
357
+ mesh.export(path_out_obj, file_type="obj")
358
+
359
+ if zip_outputs:
360
+ with zipfile.ZipFile(path_out_glb + ".zip", "w", zipfile.ZIP_DEFLATED) as zipf:
361
+ arcname = os.path.basename(os.path.splitext(path_out_glb)[0]) + ".glb"
362
+ zipf.write(path_out_glb, arcname=arcname)
363
+ path_out_glb = path_out_glb + ".zip"
364
+ with zipfile.ZipFile(path_out_stl + ".zip", "w", zipfile.ZIP_DEFLATED) as zipf:
365
+ arcname = os.path.basename(os.path.splitext(path_out_stl)[0]) + ".stl"
366
+ zipf.write(path_out_stl, arcname=arcname)
367
+ path_out_stl = path_out_stl + ".zip"
368
+ with zipfile.ZipFile(path_out_obj + ".zip", "w", zipfile.ZIP_DEFLATED) as zipf:
369
+ arcname = os.path.basename(os.path.splitext(path_out_obj)[0]) + ".obj"
370
+ zipf.write(path_out_obj, arcname=arcname)
371
+ path_out_obj = path_out_obj + ".zip"
372
+
373
+ return path_out_glb, path_out_stl, path_out_obj
374
+
375
+
376
+ if __name__ == "__main__":
377
+ img_rgb = "files/basrelief/einstein.jpg"
378
+ img_depth = "gradio_cached_examples/examples_image/Depth outputs/54d74157894322bdc77c/einstein_depth_16bit.png"
379
+ Image.open(img_rgb).resize((512, 512), Image.LANCZOS).save(
380
+ "einstein_3d_tex_512.jpg"
381
+ )
382
+ Image.open(img_depth).convert(mode="F").resize((512, 512), Image.BILINEAR).convert(
383
+ "I"
384
+ ).save("einstein_3d_depth_512.png")
385
+ extrude_depth_3d(
386
+ "einstein_3d_tex_512.jpg",
387
+ "einstein_3d_depth_512.png",
388
+ path_out_base="einstein_3d_out",
389
+ output_model_scale=100,
390
+ filter_size=3,
391
+ coef_near=0.0,
392
+ coef_far=0.5,
393
+ emboss=0.5,
394
+ f_thic=0.05,
395
+ f_near=-0.25,
396
+ f_back=0.01,
397
+ vertex_colors=True,
398
+ scene_lights=True,
399
+ prepare_for_3d_printing=True,
400
+ )
gradio_cached_examples/examples_bas/3D model outputs high-res/{f4a69712004b188f4bb0/food_depth_512.glb β†’ 0f57994f5d6ac12c1020/food_depth_512.glb.zip} RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f0a1784597fc64d8e06cf549fef351b23de5f075e04758d8ed26b1e97aefdeb1
3
- size 7053032
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2343e8ab8c40539f3d4cdb6bde4af13964b989af33d1993eb2c3edd9e3822950
3
+ size 2089619
gradio_cached_examples/examples_bas/3D model outputs high-res/0fbd5fe3c8b8dd4f05d1/einstein_depth_512.stl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:2da8767fd0fafe985b0deeb832254b26e93cfdbdb0e7040aaa3233048c566227
3
- size 26419484
 
 
 
 
gradio_cached_examples/examples_bas/3D model outputs high-res/127d9bcaf03fa5f41dd3/food_depth_512.stl.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:95a5d9c1fd336e1e73ed3bcd388ec980da90b4ba601ce999839ab44baca5abb3
3
+ size 6527220
gradio_cached_examples/examples_bas/3D model outputs high-res/49a7818bed0f17ce01cf/food_depth_512.stl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:9accc5a6bada4c60a89263e885b2d11a03a9482447ede5ab5fa72458e2522137
3
- size 17630084
 
 
 
 
gradio_cached_examples/examples_bas/3D model outputs high-res/96a98e08d96fd47e5cc6/einstein_depth_512.obj.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5b62efa853cbba11eba28f99307b40dfca300ec8047f2d778db92b3d3417d81
3
+ size 5977474
gradio_cached_examples/examples_bas/3D model outputs high-res/a17995f3d4750a0e0bbc/food_depth_512.obj.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fe92c1762da36b2d9fbe1419d5f91fc9cc64f739de6dc2b122fb1f1c6ca3e7e
3
+ size 4044632
gradio_cached_examples/examples_bas/3D model outputs high-res/b0b93bdcbedf077307ba/coin_depth_512.stl.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac5e87a94275d6df74d5a7a42672cb97fbf29a3c2a6caf8dbc3b979641fea560
3
+ size 7434540
gradio_cached_examples/examples_bas/3D model outputs high-res/b69b15d737b2de6938a5/einstein_depth_512.glb DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:aaf9049d52123504ee44e1af8426f9e081fc67450a3e7aefd7c8556e14ee7fee
3
- size 10568768
 
 
 
 
gradio_cached_examples/examples_bas/3D model outputs high-res/c0b00e36b9db31f4e9fd/coin_depth_512.stl DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:398abd4c61d86dc8bc5ab4ac5d6ed0aad50658512048176b647d8df72ae4ddf9
3
- size 26162484
 
 
 
 
gradio_cached_examples/examples_bas/3D model outputs high-res/c7499e9097e58b706e51/einstein_depth_512.glb.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2b7641a5d61736e66868e895d1c97b0f15458d694e1164cc484bdea961a309f
3
+ size 2977145
gradio_cached_examples/examples_bas/3D model outputs high-res/ebe8a8d03fbc1a1fc2bd/coin_depth_512.glb.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abb6853f1086ad891ffd7407f3b913f2019f6c28256065f1ae94145c374ec220
3
+ size 3521996
gradio_cached_examples/examples_bas/3D model outputs high-res/ee9ee048f590c0c9a2c8/einstein_depth_512.stl.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5bf0df4560b958ab8f45cda1cd86a989d0506fbc1ab4c7ff37037a200ac4fd4
3
+ size 8353042
gradio_cached_examples/examples_bas/3D model outputs high-res/ef353a417723b89274d9/coin_depth_512.glb DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:e2b98bc390dde108fc557d0a855c1ad9605e79ec6fde24122b74b63685ea5c45
3
- size 10465992
 
 
 
 
gradio_cached_examples/examples_bas/3D model outputs high-res/fbaa26ffc2eb3654c177/coin_depth_512.obj.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bce06188d1a22e3a6b6b339cca62a2ea59da00925ea6c19bc580bf56250323a
3
+ size 5631807
gradio_cached_examples/examples_bas/3D preview low-res relief highlight/{adb58f0f05db6046581f β†’ 78ff2a583036eab8fe9b}/coin_depth_256.glb RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b589d787251bd3117f5eeecead011950015ba0f4b42dc3b26f8af8f52177b91a
3
- size 2369860
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1ae49b26ba4187dc3704cc00040c5671f1088a131f4796768fa86ec3eb67c1a1
3
+ size 2369864
gradio_cached_examples/examples_bas/3D preview low-res relief highlight/{393dc8309ae2f31f47e9 β†’ 8feb5fe1e8941c880c40}/food_depth_256.glb RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4956ff55699585fd7bd703785cc894f35ec6a6c19595b7e483ccecf1377203fd
3
- size 1598964
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb7df74b49b34e916945c7bd31b45d6359db2ebb3d0bbb0e2fdd9d9222fb0816
3
+ size 1598968
gradio_cached_examples/examples_bas/3D preview low-res relief highlight/{d2ca1f18e3da0b06142e β†’ bb26fd8a9d7890806329}/einstein_depth_256.glb RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:18b05dfa7db4ce4fef14121a95abd57adcef5df5eeca23a44bc5f9e42bc3901f
3
- size 2397704
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88372996a41fccdc6e03b77edb2542fb75aaa1623b25c3cbf4a65de61131d0d3
3
+ size 2397708
gradio_cached_examples/examples_bas/log.csv CHANGED
@@ -1,4 +1,4 @@
1
  "3D preview (low-res, relief highlight)",3D model outputs (high-res),flag,username,timestamp
2
- "{""path"":""gradio_cached_examples/examples_bas/3D preview low-res relief highlight/adb58f0f05db6046581f/coin_depth_256.glb"",""url"":""/file=/tmp/gradio/784992f04cfa1fd8ac2d92cd4d58b141f3433c58/coin_depth_256.glb"",""size"":null,""orig_name"":""coin_depth_256.glb"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}}","[{""path"":""gradio_cached_examples/examples_bas/3D model outputs high-res/ef353a417723b89274d9/coin_depth_512.glb"",""url"":""/file=/tmp/gradio/b64f0ea070ce8490cd696bc76acb7bbf918d3183/coin_depth_512.glb"",""size"":10465992,""orig_name"":""coin_depth_512.glb"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}},{""path"":""gradio_cached_examples/examples_bas/3D model outputs high-res/c0b00e36b9db31f4e9fd/coin_depth_512.stl"",""url"":""/file=/tmp/gradio/15659bed2799af75f4eb1c16b4f9b0d2af113235/coin_depth_512.stl"",""size"":26162484,""orig_name"":""coin_depth_512.stl"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}}]",,,2024-04-03 18:29:48.978017
3
- "{""path"":""gradio_cached_examples/examples_bas/3D preview low-res relief highlight/d2ca1f18e3da0b06142e/einstein_depth_256.glb"",""url"":""/file=/tmp/gradio/59b193d76e6d22d077eceffbc7653b132c85b8b3/einstein_depth_256.glb"",""size"":null,""orig_name"":""einstein_depth_256.glb"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}}","[{""path"":""gradio_cached_examples/examples_bas/3D model outputs high-res/b69b15d737b2de6938a5/einstein_depth_512.glb"",""url"":""/file=/tmp/gradio/2495ecf670703eea800c67256f1c14df947c133c/einstein_depth_512.glb"",""size"":10568768,""orig_name"":""einstein_depth_512.glb"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}},{""path"":""gradio_cached_examples/examples_bas/3D model outputs high-res/0fbd5fe3c8b8dd4f05d1/einstein_depth_512.stl"",""url"":""/file=/tmp/gradio/324a085b1d9c80ff7084169f2fa9a88caf67084c/einstein_depth_512.stl"",""size"":26419484,""orig_name"":""einstein_depth_512.stl"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}}]",,,2024-04-03 18:29:51.356218
4
- "{""path"":""gradio_cached_examples/examples_bas/3D preview low-res relief highlight/393dc8309ae2f31f47e9/food_depth_256.glb"",""url"":""/file=/tmp/gradio/067615320ce5aadeb06abed3ca571ed420986df5/food_depth_256.glb"",""size"":null,""orig_name"":""food_depth_256.glb"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}}","[{""path"":""gradio_cached_examples/examples_bas/3D model outputs high-res/f4a69712004b188f4bb0/food_depth_512.glb"",""url"":""/file=/tmp/gradio/ed41062301027eb8fffedbecde30e5ce2e0123bc/food_depth_512.glb"",""size"":7053032,""orig_name"":""food_depth_512.glb"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}},{""path"":""gradio_cached_examples/examples_bas/3D model outputs high-res/49a7818bed0f17ce01cf/food_depth_512.stl"",""url"":""/file=/tmp/gradio/9eb6407ecff9a420865feebadad805e4c1bc23d8/food_depth_512.stl"",""size"":17630084,""orig_name"":""food_depth_512.stl"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}}]",,,2024-04-03 18:29:54.313883
 
1
  "3D preview (low-res, relief highlight)",3D model outputs (high-res),flag,username,timestamp
2
+ "{""path"": ""gradio_cached_examples/examples_bas/3D preview low-res relief highlight/78ff2a583036eab8fe9b/coin_depth_256.glb"", ""url"": ""/file=/tmp/gradio/75f3f8661319bbfa07a73e3c6aad5381af2bb662/coin_depth_256.glb"", ""size"": null, ""orig_name"": ""coin_depth_256.glb"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}","[{""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/ebe8a8d03fbc1a1fc2bd/coin_depth_512.glb.zip"", ""url"": ""/file=/tmp/gradio/8d501a8d9351d4ac105f330018213f88c8ab46e2/coin_depth_512.glb.zip"", ""size"": 3521996, ""orig_name"": ""coin_depth_512.glb.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, {""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/b0b93bdcbedf077307ba/coin_depth_512.stl.zip"", ""url"": ""/file=/tmp/gradio/67269f9efe556613199b0e4e587db71ecaf273e7/coin_depth_512.stl.zip"", ""size"": 7434540, ""orig_name"": ""coin_depth_512.stl.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, {""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/fbaa26ffc2eb3654c177/coin_depth_512.obj.zip"", ""url"": ""/file=/tmp/gradio/3e0dc5d754e113c9553b04644fe50279a65ad85d/coin_depth_512.obj.zip"", ""size"": 5631807, ""orig_name"": ""coin_depth_512.obj.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}]",,,2024-06-02 01:52:32.122923
3
+ "{""path"": ""gradio_cached_examples/examples_bas/3D preview low-res relief highlight/bb26fd8a9d7890806329/einstein_depth_256.glb"", ""url"": ""/file=/tmp/gradio/d72948c76f50b644cfd2bc4f2eb68ec4d1dfd6b3/einstein_depth_256.glb"", ""size"": null, ""orig_name"": ""einstein_depth_256.glb"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}","[{""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/c7499e9097e58b706e51/einstein_depth_512.glb.zip"", ""url"": ""/file=/tmp/gradio/1d0bac2ed809c2c3d7ffecf0a40ca8f8398521da/einstein_depth_512.glb.zip"", ""size"": 2977145, ""orig_name"": ""einstein_depth_512.glb.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, {""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/ee9ee048f590c0c9a2c8/einstein_depth_512.stl.zip"", ""url"": ""/file=/tmp/gradio/3eb2701225d3b2362b3fc702c7b02bf2d9072308/einstein_depth_512.stl.zip"", ""size"": 8353042, ""orig_name"": ""einstein_depth_512.stl.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, {""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/96a98e08d96fd47e5cc6/einstein_depth_512.obj.zip"", ""url"": ""/file=/tmp/gradio/063a0b23c678e16f689b0bd8e3b6784e084a283f/einstein_depth_512.obj.zip"", ""size"": 5977474, ""orig_name"": ""einstein_depth_512.obj.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}]",,,2024-06-02 01:52:37.086710
4
+ "{""path"": ""gradio_cached_examples/examples_bas/3D preview low-res relief highlight/8feb5fe1e8941c880c40/food_depth_256.glb"", ""url"": ""/file=/tmp/gradio/32d5f81d5bff33dcbf73bf5b7f46e23f9f73e0e3/food_depth_256.glb"", ""size"": null, ""orig_name"": ""food_depth_256.glb"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}","[{""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/0f57994f5d6ac12c1020/food_depth_512.glb.zip"", ""url"": ""/file=/tmp/gradio/c7790f4982c192c3fe02d99143d293d0c6c28bc3/food_depth_512.glb.zip"", ""size"": 2089619, ""orig_name"": ""food_depth_512.glb.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, {""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/127d9bcaf03fa5f41dd3/food_depth_512.stl.zip"", ""url"": ""/file=/tmp/gradio/cdfe46cad85481cdf6472efdddfdc6cca3fad3f4/food_depth_512.stl.zip"", ""size"": 6527220, ""orig_name"": ""food_depth_512.stl.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}, {""path"": ""gradio_cached_examples/examples_bas/3D model outputs high-res/a17995f3d4750a0e0bbc/food_depth_512.obj.zip"", ""url"": ""/file=/tmp/gradio/9f3cac3ebaef08375d5558783cb66009842b5a38/food_depth_512.obj.zip"", ""size"": 4044632, ""orig_name"": ""food_depth_512.obj.zip"", ""mime_type"": null, ""is_stream"": false, ""meta"": {""_type"": ""gradio.FileData""}}]",,,2024-06-02 01:52:42.365773
gradio_cached_examples/examples_video/Depth outputs/{207a3fa61126cf3d1981 β†’ 41ffc6734af27742f841}/obama_depth_16bit.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c42c9011121d927ca26e0abfc62f3724fbfc8a6848b522010f2e6ed98210a7ce
3
- size 38156752
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e475e04f4b079263eefb9e90b01c303b02a361a927d9d8617b8c9875a43166a
3
+ size 32516136
gradio_cached_examples/examples_video/Depth outputs/{93ea09fe13fa3ed86f2d β†’ 4d0f30c84219f7e28c5b}/cab_depth_16bit.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d5890dc53ca881d7327a9e70c8ac895ce2b21389af7da22d91060dd059663ab3
3
- size 39185578
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98662bffb5c1e4c7600e6b6bf05a66fc1e0fa393a95d6bee349076e4364c97cc
3
+ size 41584240
gradio_cached_examples/examples_video/{Output video depth red-near blue-far/a9ebfdb1e7b3856d929a β†’ Depth outputs/7e50f61bc97230e10bc9}/cab_depth_colored.mp4 RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:81b3d4916e5b594174a49a7b064e4490b39dbed443f9d8ecea347c433284ff54
3
- size 473925
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a26735b01e362daef276832e1d6604a6dfbf7239ead96b98de490e39fca4dbc5
3
+ size 672175
gradio_cached_examples/examples_video/Depth outputs/{0467a73b0d8d3ade99a7 β†’ abdbb71b14bbbd9e179e}/elephant_depth_colored.mp4 RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9dfb3689c97f04cabf39b4aa8a06ad4ce8fb424a353bf30d1641fb28f79c4d48
3
- size 755285
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81ffe1eaf172ce2767fae276c9ffb272ba858b540e50c326556bc80cc6b0e6bf
3
+ size 950445
gradio_cached_examples/examples_video/Depth outputs/{93d8ed44918458cb56b7 β†’ c3260a8a07bb43e647b8}/elephant_depth_16bit.zip RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:46f56972e6aced965f89c3c2057534fe88fb3898ae4f7fcd3af4e5cd9a8527d6
3
- size 41877515
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4feebb5dc02d71b499a33bf18040c60dedf8563413087ee17d71f7edd394e0c
3
+ size 50743181
gradio_cached_examples/examples_video/Depth outputs/{df8f2c089d19ebdb0eef β†’ ffbf959599ab71f64cb8}/obama_depth_colored.mp4 RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:86a007b675d5e294a7e8b036dea45a1034a0580f387353feec599920eb560ba7
3
- size 430863
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61a04be91e28ad207d3813b4cae689d9d2510adf88a9826bd6b55a7b93934422
3
+ size 505939
gradio_cached_examples/examples_video/Output video depth red-near blue-far/{2301d8bcf6db4f0588c7 β†’ 575c12e3b6ef94b4620e}/obama_depth_colored.mp4 RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:86a007b675d5e294a7e8b036dea45a1034a0580f387353feec599920eb560ba7
3
- size 430863
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61a04be91e28ad207d3813b4cae689d9d2510adf88a9826bd6b55a7b93934422
3
+ size 505939
gradio_cached_examples/examples_video/{Depth outputs/c12711ccd5a2c49cf18c β†’ Output video depth red-near blue-far/6002bb1e4b17b4f06de1}/cab_depth_colored.mp4 RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:81b3d4916e5b594174a49a7b064e4490b39dbed443f9d8ecea347c433284ff54
3
- size 473925
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a26735b01e362daef276832e1d6604a6dfbf7239ead96b98de490e39fca4dbc5
3
+ size 672175
gradio_cached_examples/examples_video/Output video depth red-near blue-far/{89a2cce251acce733da7 β†’ d0eb4f4347977106f1f2}/elephant_depth_colored.mp4 RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9dfb3689c97f04cabf39b4aa8a06ad4ce8fb424a353bf30d1641fb28f79c4d48
3
- size 755285
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81ffe1eaf172ce2767fae276c9ffb272ba858b540e50c326556bc80cc6b0e6bf
3
+ size 950445
gradio_cached_examples/examples_video/log.csv CHANGED
@@ -1,4 +1,4 @@
1
  "Output video depth (red-near, blue-far)",Depth outputs,flag,username,timestamp
2
- "{""video"":{""path"":""gradio_cached_examples/examples_video/Output video depth red-near blue-far/a9ebfdb1e7b3856d929a/cab_depth_colored.mp4"",""url"":""/file=/tmp/gradio/baf61a4ecb34b14c80bd9dcc84bd792760ea4970/cab_depth_colored.mp4"",""size"":null,""orig_name"":""cab_depth_colored.mp4"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}},""subtitles"":null}","[{""path"":""gradio_cached_examples/examples_video/Depth outputs/c12711ccd5a2c49cf18c/cab_depth_colored.mp4"",""url"":""/file=/tmp/gradio/baf61a4ecb34b14c80bd9dcc84bd792760ea4970/cab_depth_colored.mp4"",""size"":473925,""orig_name"":""cab_depth_colored.mp4"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}},{""path"":""gradio_cached_examples/examples_video/Depth outputs/93ea09fe13fa3ed86f2d/cab_depth_16bit.zip"",""url"":""/file=/tmp/gradio/d12046d2cec66d2c8f5b425605842521d6dfcf16/cab_depth_16bit.zip"",""size"":39185578,""orig_name"":""cab_depth_16bit.zip"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}}]",,,2024-04-03 18:28:18.946260
3
- "{""video"":{""path"":""gradio_cached_examples/examples_video/Output video depth red-near blue-far/89a2cce251acce733da7/elephant_depth_colored.mp4"",""url"":""/file=/tmp/gradio/33743a70fe7f2a295fbfcea57161cfd65b59f525/elephant_depth_colored.mp4"",""size"":null,""orig_name"":""elephant_depth_colored.mp4"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}},""subtitles"":null}","[{""path"":""gradio_cached_examples/examples_video/Depth outputs/0467a73b0d8d3ade99a7/elephant_depth_colored.mp4"",""url"":""/file=/tmp/gradio/33743a70fe7f2a295fbfcea57161cfd65b59f525/elephant_depth_colored.mp4"",""size"":755285,""orig_name"":""elephant_depth_colored.mp4"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}},{""path"":""gradio_cached_examples/examples_video/Depth outputs/93d8ed44918458cb56b7/elephant_depth_16bit.zip"",""url"":""/file=/tmp/gradio/b6bee3a1450eb3169246820b83b4697fd13bdc5c/elephant_depth_16bit.zip"",""size"":41877515,""orig_name"":""elephant_depth_16bit.zip"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}}]",,,2024-04-03 18:28:58.771952
4
- "{""video"":{""path"":""gradio_cached_examples/examples_video/Output video depth red-near blue-far/2301d8bcf6db4f0588c7/obama_depth_colored.mp4"",""url"":""/file=/tmp/gradio/4fe8eb9aba002687469b3311ff39a0018692b10c/obama_depth_colored.mp4"",""size"":null,""orig_name"":""obama_depth_colored.mp4"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}},""subtitles"":null}","[{""path"":""gradio_cached_examples/examples_video/Depth outputs/df8f2c089d19ebdb0eef/obama_depth_colored.mp4"",""url"":""/file=/tmp/gradio/4fe8eb9aba002687469b3311ff39a0018692b10c/obama_depth_colored.mp4"",""size"":430863,""orig_name"":""obama_depth_colored.mp4"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}},{""path"":""gradio_cached_examples/examples_video/Depth outputs/207a3fa61126cf3d1981/obama_depth_16bit.zip"",""url"":""/file=/tmp/gradio/476f799e9f8c4000140a77684a2f34cbe86586f2/obama_depth_16bit.zip"",""size"":38156752,""orig_name"":""obama_depth_16bit.zip"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}}]",,,2024-04-03 18:29:43.295723
 
1
  "Output video depth (red-near, blue-far)",Depth outputs,flag,username,timestamp
2
+ "{""video"":{""path"":""gradio_cached_examples/examples_video/Output video depth red-near blue-far/6002bb1e4b17b4f06de1/cab_depth_colored.mp4"",""url"":""/file=/tmp/gradio/78d7a698603ea6a8b5e1a07b5613c044ee89b3cf/cab_depth_colored.mp4"",""size"":null,""orig_name"":""cab_depth_colored.mp4"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}},""subtitles"":null}","[{""path"":""gradio_cached_examples/examples_video/Depth outputs/7e50f61bc97230e10bc9/cab_depth_colored.mp4"",""url"":""/file=/tmp/gradio/78d7a698603ea6a8b5e1a07b5613c044ee89b3cf/cab_depth_colored.mp4"",""size"":551342,""orig_name"":""cab_depth_colored.mp4"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}},{""path"":""gradio_cached_examples/examples_video/Depth outputs/4d0f30c84219f7e28c5b/cab_depth_16bit.zip"",""url"":""/file=/tmp/gradio/4f2596bb97d63c19425cd4f24502bf4549549c68/cab_depth_16bit.zip"",""size"":41330021,""orig_name"":""cab_depth_16bit.zip"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}}]",,,2024-03-28 17:10:29.241744
3
+ "{""video"":{""path"":""gradio_cached_examples/examples_video/Output video depth red-near blue-far/d0eb4f4347977106f1f2/elephant_depth_colored.mp4"",""url"":""/file=/tmp/gradio/e8f142b72d6cbe92e6f5a28df8e89b604dfa7138/elephant_depth_colored.mp4"",""size"":null,""orig_name"":""elephant_depth_colored.mp4"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}},""subtitles"":null}","[{""path"":""gradio_cached_examples/examples_video/Depth outputs/abdbb71b14bbbd9e179e/elephant_depth_colored.mp4"",""url"":""/file=/tmp/gradio/e8f142b72d6cbe92e6f5a28df8e89b604dfa7138/elephant_depth_colored.mp4"",""size"":846854,""orig_name"":""elephant_depth_colored.mp4"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}},{""path"":""gradio_cached_examples/examples_video/Depth outputs/c3260a8a07bb43e647b8/elephant_depth_16bit.zip"",""url"":""/file=/tmp/gradio/92a046ee5518a817550aab4a4cf07df5a1a71ad5/elephant_depth_16bit.zip"",""size"":42873909,""orig_name"":""elephant_depth_16bit.zip"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}}]",,,2024-03-28 17:11:09.183092
4
+ "{""video"":{""path"":""gradio_cached_examples/examples_video/Output video depth red-near blue-far/575c12e3b6ef94b4620e/obama_depth_colored.mp4"",""url"":""/file=/tmp/gradio/429cce73a4a67c3c47ae93357bc8d0bea33789cb/obama_depth_colored.mp4"",""size"":null,""orig_name"":""obama_depth_colored.mp4"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}},""subtitles"":null}","[{""path"":""gradio_cached_examples/examples_video/Depth outputs/ffbf959599ab71f64cb8/obama_depth_colored.mp4"",""url"":""/file=/tmp/gradio/429cce73a4a67c3c47ae93357bc8d0bea33789cb/obama_depth_colored.mp4"",""size"":509224,""orig_name"":""obama_depth_colored.mp4"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}},{""path"":""gradio_cached_examples/examples_video/Depth outputs/41ffc6734af27742f841/obama_depth_16bit.zip"",""url"":""/file=/tmp/gradio/368fa8ae89ce36b3b6fb0d6044510e5cacef38fd/obama_depth_16bit.zip"",""size"":42499726,""orig_name"":""obama_depth_16bit.zip"",""mime_type"":null,""is_stream"":false,""meta"":{""_type"":""gradio.FileData""}}]",,,2024-03-28 17:11:54.147899
requirements.txt CHANGED
@@ -1,10 +1,10 @@
1
- accelerate==0.25.0
2
  aiofiles==23.2.1
3
- aiohttp==3.9.3
4
  aiosignal==1.3.1
5
  altair==5.3.0
6
- annotated-types==0.6.0
7
- anyio==4.3.0
8
  async-timeout==4.0.3
9
  attrs==23.2.0
10
  Authlib==1.3.0
@@ -12,115 +12,120 @@ certifi==2024.2.2
12
  cffi==1.16.0
13
  charset-normalizer==3.3.2
14
  click==8.0.4
15
- cmake==3.29.0.1
16
- contourpy==1.2.0
17
- cryptography==42.0.5
18
  cycler==0.12.1
19
- dataclasses-json==0.6.4
20
- datasets==2.18.0
21
  Deprecated==1.2.14
22
- diffusers==0.27.2
23
  dill==0.3.8
24
- exceptiongroup==1.2.0
25
- fastapi==0.110.0
 
 
 
26
  ffmpy==0.3.2
27
- filelock==3.13.3
28
- fonttools==4.50.0
29
  frozenlist==1.4.1
30
- fsspec==2024.2.0
31
- gradio==4.21.0
32
- gradio_client==0.12.0
33
- gradio_imageslider==0.0.18
34
  h11==0.14.0
35
  httpcore==1.0.5
 
36
  httpx==0.27.0
37
- huggingface-hub==0.22.1
38
- idna==3.6
39
- imageio==2.34.0
40
- imageio-ffmpeg==0.4.9
41
  importlib_metadata==7.1.0
42
  importlib_resources==6.4.0
43
- itsdangerous==2.1.2
44
- Jinja2==3.1.3
45
- jsonschema==4.21.1
46
  jsonschema-specifications==2023.12.1
47
  kiwisolver==1.4.5
48
- lit==18.1.2
49
  markdown-it-py==3.0.0
50
  MarkupSafe==2.1.5
51
- marshmallow==3.21.1
52
  matplotlib==3.8.2
53
  mdurl==0.1.2
54
  mpmath==1.3.0
55
  multidict==6.0.5
56
  multiprocess==0.70.16
57
  mypy-extensions==1.0.0
58
- networkx==3.2.1
59
  numpy==1.26.4
60
- nvidia-cublas-cu11==11.10.3.66
61
- nvidia-cuda-cupti-cu11==11.7.101
62
- nvidia-cuda-nvrtc-cu11==11.7.99
63
- nvidia-cuda-runtime-cu11==11.7.99
64
- nvidia-cudnn-cu11==8.5.0.96
65
- nvidia-cufft-cu11==10.9.0.58
66
- nvidia-curand-cu11==10.2.10.91
67
- nvidia-cusolver-cu11==11.4.0.1
68
- nvidia-cusparse-cu11==11.7.4.91
69
- nvidia-nccl-cu11==2.14.3
70
- nvidia-nvtx-cu11==11.7.91
71
- orjson==3.10.0
 
72
  packaging==24.0
73
- pandas==2.2.1
74
- pillow==10.2.0
75
  protobuf==3.20.3
76
  psutil==5.9.8
77
- pyarrow==15.0.2
78
  pyarrow-hotfix==0.6
79
  pycparser==2.22
80
- pydantic==2.6.4
81
- pydantic_core==2.16.3
82
  pydub==0.25.1
83
  pygltflib==1.16.1
84
- Pygments==2.17.2
85
  pyparsing==3.1.2
86
  python-dateutil==2.9.0.post0
 
87
  python-multipart==0.0.9
88
  pytz==2024.1
89
  PyYAML==6.0.1
90
- referencing==0.34.0
91
- regex==2023.12.25
92
  requests==2.31.0
93
  rich==13.7.1
94
- rpds-py==0.18.0
95
- ruff==0.3.4
96
- safetensors==0.4.2
97
  scipy==1.11.4
98
  semantic-version==2.10.0
99
  shellingham==1.5.4
100
  six==1.16.0
101
  sniffio==1.3.1
102
- spaces==0.25.0
103
- starlette==0.36.3
104
- sympy==1.12
105
  tokenizers==0.15.2
106
  tomlkit==0.12.0
107
  toolz==0.12.1
108
- torch==2.0.1
109
- tqdm==4.66.2
110
  transformers==4.36.1
111
  trimesh==4.0.5
112
- triton==2.0.0
113
- typer==0.12.0
114
- typer-cli==0.12.0
115
- typer-slim==0.12.0
116
  typing-inspect==0.9.0
117
- typing_extensions==4.10.0
118
  tzdata==2024.1
 
119
  urllib3==2.2.1
120
- uvicorn==0.29.0
 
 
121
  websockets==11.0.3
122
  wrapt==1.16.0
123
- xformers==0.0.21
124
  xxhash==3.4.1
125
  yarl==1.9.4
126
- zipp==3.18.1
 
1
+ accelerate==0.30.1
2
  aiofiles==23.2.1
3
+ aiohttp==3.9.5
4
  aiosignal==1.3.1
5
  altair==5.3.0
6
+ annotated-types==0.7.0
7
+ anyio==4.4.0
8
  async-timeout==4.0.3
9
  attrs==23.2.0
10
  Authlib==1.3.0
 
12
  cffi==1.16.0
13
  charset-normalizer==3.3.2
14
  click==8.0.4
15
+ contourpy==1.2.1
16
+ cryptography==42.0.7
 
17
  cycler==0.12.1
18
+ dataclasses-json==0.6.6
19
+ datasets==2.19.1
20
  Deprecated==1.2.14
21
+ diffusers==0.28.0
22
  dill==0.3.8
23
+ dnspython==2.6.1
24
+ email_validator==2.1.1
25
+ exceptiongroup==1.2.1
26
+ fastapi==0.111.0
27
+ fastapi-cli==0.0.4
28
  ffmpy==0.3.2
29
+ filelock==3.14.0
30
+ fonttools==4.53.0
31
  frozenlist==1.4.1
32
+ fsspec==2024.3.1
33
+ gradio==4.32.2
34
+ gradio_client==0.17.0
35
+ gradio_imageslider==0.0.20
36
  h11==0.14.0
37
  httpcore==1.0.5
38
+ httptools==0.6.1
39
  httpx==0.27.0
40
+ huggingface-hub==0.23.0
41
+ idna==3.7
42
+ imageio==2.34.1
43
+ imageio-ffmpeg==0.5.0
44
  importlib_metadata==7.1.0
45
  importlib_resources==6.4.0
46
+ itsdangerous==2.2.0
47
+ Jinja2==3.1.4
48
+ jsonschema==4.22.0
49
  jsonschema-specifications==2023.12.1
50
  kiwisolver==1.4.5
 
51
  markdown-it-py==3.0.0
52
  MarkupSafe==2.1.5
53
+ marshmallow==3.21.2
54
  matplotlib==3.8.2
55
  mdurl==0.1.2
56
  mpmath==1.3.0
57
  multidict==6.0.5
58
  multiprocess==0.70.16
59
  mypy-extensions==1.0.0
60
+ networkx==3.3
61
  numpy==1.26.4
62
+ nvidia-cublas-cu12==12.1.3.1
63
+ nvidia-cuda-cupti-cu12==12.1.105
64
+ nvidia-cuda-nvrtc-cu12==12.1.105
65
+ nvidia-cuda-runtime-cu12==12.1.105
66
+ nvidia-cudnn-cu12==8.9.2.26
67
+ nvidia-cufft-cu12==11.0.2.54
68
+ nvidia-curand-cu12==10.3.2.106
69
+ nvidia-cusolver-cu12==11.4.5.107
70
+ nvidia-cusparse-cu12==12.1.0.106
71
+ nvidia-nccl-cu12==2.19.3
72
+ nvidia-nvjitlink-cu12==12.5.40
73
+ nvidia-nvtx-cu12==12.1.105
74
+ orjson==3.10.3
75
  packaging==24.0
76
+ pandas==2.2.2
77
+ pillow==10.3.0
78
  protobuf==3.20.3
79
  psutil==5.9.8
80
+ pyarrow==16.0.0
81
  pyarrow-hotfix==0.6
82
  pycparser==2.22
83
+ pydantic==2.7.2
84
+ pydantic_core==2.18.3
85
  pydub==0.25.1
86
  pygltflib==1.16.1
87
+ Pygments==2.18.0
88
  pyparsing==3.1.2
89
  python-dateutil==2.9.0.post0
90
+ python-dotenv==1.0.1
91
  python-multipart==0.0.9
92
  pytz==2024.1
93
  PyYAML==6.0.1
94
+ referencing==0.35.1
95
+ regex==2024.5.15
96
  requests==2.31.0
97
  rich==13.7.1
98
+ rpds-py==0.18.1
99
+ ruff==0.4.7
100
+ safetensors==0.4.3
101
  scipy==1.11.4
102
  semantic-version==2.10.0
103
  shellingham==1.5.4
104
  six==1.16.0
105
  sniffio==1.3.1
106
+ spaces==0.28.3
107
+ starlette==0.37.2
108
+ sympy==1.12.1
109
  tokenizers==0.15.2
110
  tomlkit==0.12.0
111
  toolz==0.12.1
112
+ torch==2.2.0
113
+ tqdm==4.66.4
114
  transformers==4.36.1
115
  trimesh==4.0.5
116
+ triton==2.2.0
117
+ typer==0.12.3
 
 
118
  typing-inspect==0.9.0
119
+ typing_extensions==4.11.0
120
  tzdata==2024.1
121
+ ujson==5.10.0
122
  urllib3==2.2.1
123
+ uvicorn==0.30.0
124
+ uvloop==0.19.0
125
+ watchfiles==0.22.0
126
  websockets==11.0.3
127
  wrapt==1.16.0
128
+ xformers==0.0.24
129
  xxhash==3.4.1
130
  yarl==1.9.4
131
+ zipp==3.19.1
requirements_min.txt CHANGED
@@ -1,14 +1,14 @@
1
- gradio==4.21.0
2
- gradio-imageslider==0.0.18
3
  pygltflib==1.16.1
4
  trimesh==4.0.5
5
  imageio
6
  imageio-ffmpeg
7
  Pillow
8
 
9
- spaces==0.25.0
10
- accelerate==0.25.0
11
- diffusers==0.27.2
12
  matplotlib==3.8.2
13
  scipy==1.11.4
14
  torch==2.0.1
 
1
+ gradio>=4.32.1
2
+ gradio-imageslider>=0.0.20
3
  pygltflib==1.16.1
4
  trimesh==4.0.5
5
  imageio
6
  imageio-ffmpeg
7
  Pillow
8
 
9
+ spaces
10
+ accelerate
11
+ diffusers>=0.28.0
12
  matplotlib==3.8.2
13
  scipy==1.11.4
14
  torch==2.0.1