LiheYoung commited on
Commit
8a2c153
1 Parent(s): a34c466

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -6
app.py CHANGED
@@ -1,8 +1,8 @@
1
  import gradio as gr
2
  import cv2
3
  import numpy as np
 
4
  from PIL import Image
5
- import spaces
6
  import torch
7
  import torch.nn.functional as F
8
  from torchvision.transforms import Compose
@@ -21,7 +21,6 @@ css = """
21
  #img-display-output {
22
  max-height: 80vh;
23
  }
24
-
25
  """
26
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
27
  model = DPT_DINOv2(encoder='vitl', features=256, out_channels=[256, 512, 1024, 1024]).to(DEVICE).eval()
@@ -29,7 +28,6 @@ model.load_state_dict(torch.load('checkpoints/depth_anything_vitl14.pth'))
29
 
30
  title = "# Depth Anything"
31
  description = """Official demo for **Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data**.
32
-
33
  Please refer to our [paper](), [project page](https://depth-anything.github.io), or [github](https://github.com/LiheYoung/Depth-Anything) for more details."""
34
 
35
  transform = Compose([
@@ -47,7 +45,6 @@ transform = Compose([
47
  ])
48
 
49
 
50
- @spaces.GPU
51
  @torch.no_grad()
52
  def predict_depth(model, image):
53
  return model(image)
@@ -85,9 +82,13 @@ with gr.Blocks(css=css) as demo:
85
  return [colored_depth, tmp.name]
86
 
87
  submit.click(on_submit, inputs=[input_image], outputs=[depth_image, raw_file])
88
- examples = gr.Examples(examples=["examples/flower.png", "examples/roller_coaster.png", "examples/hall.png", "examples/car.png", "examples/person.png"],
89
- inputs=[input_image])
 
 
 
90
 
91
 
92
  if __name__ == '__main__':
93
  demo.queue().launch()
 
 
1
  import gradio as gr
2
  import cv2
3
  import numpy as np
4
+ import os
5
  from PIL import Image
 
6
  import torch
7
  import torch.nn.functional as F
8
  from torchvision.transforms import Compose
 
21
  #img-display-output {
22
  max-height: 80vh;
23
  }
 
24
  """
25
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
26
  model = DPT_DINOv2(encoder='vitl', features=256, out_channels=[256, 512, 1024, 1024]).to(DEVICE).eval()
 
28
 
29
  title = "# Depth Anything"
30
  description = """Official demo for **Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data**.
 
31
  Please refer to our [paper](), [project page](https://depth-anything.github.io), or [github](https://github.com/LiheYoung/Depth-Anything) for more details."""
32
 
33
  transform = Compose([
 
45
  ])
46
 
47
 
 
48
  @torch.no_grad()
49
  def predict_depth(model, image):
50
  return model(image)
 
82
  return [colored_depth, tmp.name]
83
 
84
  submit.click(on_submit, inputs=[input_image], outputs=[depth_image, raw_file])
85
+
86
+ example_files = os.listdir('examples')
87
+ example_files.sort()
88
+ example_files = [os.path.join('examples', filename) for filename in example_files]
89
+ examples = gr.Examples(examples=example_files, inputs=[input_image])
90
 
91
 
92
  if __name__ == '__main__':
93
  demo.queue().launch()
94
+