Binlaveloos commited on
Commit
2a330a0
1 Parent(s): 8974d74

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -7
app.py CHANGED
@@ -9,11 +9,14 @@ import os
9
  import spaces
10
  import json
11
 
12
- dpt_beit = pipeline(task = "depth-estimation", model="Intel/dpt-beit-base-384", device=0)
13
- #dpt_beit = pipeline(task = "depth-estimation", model="Intel/dpt-beit-large-512", device=0)
14
 
15
- depth_anything = pipeline(task = "depth-estimation", model="nielsr/depth-anything-small", device=0)
16
- #depth_anything = pipeline(task = "depth-estimation", model="LiheYoung/depth-anything-large-hf", device=0)
 
 
 
17
 
18
  dpt_large = pipeline(task = "depth-estimation", model="intel/dpt-large", device=0)
19
 
@@ -42,6 +45,19 @@ css = """
42
  border: 1px solid #ccc;
43
  }
44
  """
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  with gr.Blocks(css=css) as demo:
46
  gr.HTML("<h1><center>Compare Depth Estimation Models<center><h1>")
47
  gr.Markdown("In this Space, you can compare different depth estimation models: [DPT-Large](https://huggingface.co/Intel/dpt-large), [DPT with BeiT backbone](https://huggingface.co/Intel/dpt-beit-large-512) and the recent [Depth Anything Model small checkpoint](https://huggingface.co/LiheYoung/depth-anything-small-hf). 🤩")
@@ -52,9 +68,9 @@ with gr.Blocks(css=css) as demo:
52
  with gr.Row():
53
  input_img = gr.Image(label="Input Image", type="pil")
54
  with gr.Row():
55
- output_1 = gr.Image(type="pil", label="DPT-Large")
56
- output_2 = gr.Image(type="pil", label="DPT with BeiT Backbone")
57
- output_3 = gr.Image(type="pil", label="Depth Anything")
58
 
59
  gr.Examples([["bee.jpg"], ["cat.png"], ["cats.png"]],
60
  inputs = input_img,
@@ -66,5 +82,19 @@ with gr.Blocks(css=css) as demo:
66
 
67
  input_img.change(infer, [input_img], [output_1, output_2, output_3])
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
 
70
  demo.launch(debug=True, share=True)
 
9
  import spaces
10
  import json
11
 
12
+ from gradio_depth_pred import create_demo as create_depth_pred_demo
13
+ from gradio_im_to_3d import create_demo as create_im_to_3d_demo
14
 
15
+ #dpt_beit = pipeline(task = "depth-estimation", model="Intel/dpt-beit-base-384", device=0)
16
+ dpt_beit = pipeline(task = "depth-estimation", model="Intel/dpt-beit-large-512", device=0)
17
+
18
+ #depth_anything = pipeline(task = "depth-estimation", model="nielsr/depth-anything-small", device=0)
19
+ depth_anything = pipeline(task = "depth-estimation", model="LiheYoung/depth-anything-large-hf", device=0)
20
 
21
  dpt_large = pipeline(task = "depth-estimation", model="intel/dpt-large", device=0)
22
 
 
45
  border: 1px solid #ccc;
46
  }
47
  """
48
+
49
+ css_zoe = """
50
+ #img-display-container {
51
+ max-height: 50vh;
52
+ }
53
+ #img-display-input {
54
+ max-height: 40vh;
55
+ }
56
+ #img-display-output {
57
+ max-height: 40vh;
58
+ }
59
+
60
+ """
61
  with gr.Blocks(css=css) as demo:
62
  gr.HTML("<h1><center>Compare Depth Estimation Models<center><h1>")
63
  gr.Markdown("In this Space, you can compare different depth estimation models: [DPT-Large](https://huggingface.co/Intel/dpt-large), [DPT with BeiT backbone](https://huggingface.co/Intel/dpt-beit-large-512) and the recent [Depth Anything Model small checkpoint](https://huggingface.co/LiheYoung/depth-anything-small-hf). 🤩")
 
68
  with gr.Row():
69
  input_img = gr.Image(label="Input Image", type="pil")
70
  with gr.Row():
71
+ output_1 = gr.Image(type="pil", label="Intel dpt-large")
72
+ output_2 = gr.Image(type="pil", label="DPT with BeiT Backbone, dpt-beit-large-512")
73
+ output_3 = gr.Image(type="pil", label="LiheYoung/depth-anything-large-hf")
74
 
75
  gr.Examples([["bee.jpg"], ["cat.png"], ["cats.png"]],
76
  inputs = input_img,
 
82
 
83
  input_img.change(infer, [input_img], [output_1, output_2, output_3])
84
 
85
+ with gr.Blocks(css=css|css_zoe) as demo2:
86
+ gr.Markdown(title)
87
+ gr.Markdown(description)
88
+ with gr.Tab("Depth Prediction"):
89
+ create_depth_pred_demo(model)
90
+ with gr.Tab("Image to 3D"):
91
+ create_im_to_3d_demo(model)
92
+ with gr.Tab("360 Panorama to 3D"):
93
+ create_pano_to_3d_demo(model)
94
+
95
+ gr.HTML('''<br><br><br><center>You can duplicate this Space to skip the queue:<a href="https://huggingface.co/spaces/shariqfarooq/ZoeDepth?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a><br>
96
+ <p><img src="https://visitor-badge.glitch.me/badge?page_id=shariqfarooq.zoedepth_demo_hf" alt="visitors"></p></center>''')
97
+
98
+
99
 
100
  demo.launch(debug=True, share=True)