LiheYoung commited on
Commit
17e4368
1 Parent(s): 3ccadf8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -31
app.py CHANGED
@@ -3,6 +3,7 @@ import cv2
3
  import numpy as np
4
  import os
5
  from PIL import Image
 
6
  import torch
7
  import torch.nn.functional as F
8
  from torchvision.transforms import Compose
@@ -24,11 +25,11 @@ css = """
24
  }
25
  """
26
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
27
- model = DepthAnything.from_pretrained('LiheYoung/depth_anything_vitl14').to(DEVICE).eval()
 
28
 
29
  title = "# Depth Anything"
30
  description = """Official demo for **Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data**.
31
-
32
  Please refer to our [paper](https://arxiv.org/abs/2401.10891), [project page](https://depth-anything.github.io), or [github](https://github.com/LiheYoung/Depth-Anything) for more details."""
33
 
34
  transform = Compose([
@@ -45,54 +46,53 @@ transform = Compose([
45
  PrepareForNet(),
46
  ])
47
 
48
- margin_width = 50
49
-
50
  @torch.no_grad()
51
  def predict_depth(model, image):
52
  return model(image)
53
 
 
54
  with gr.Blocks(css=css) as demo:
55
  gr.Markdown(title)
56
  gr.Markdown(description)
57
  gr.Markdown("### Depth Prediction demo")
58
  gr.Markdown("You can slide the output to compare the depth prediction with input image")
59
 
60
- with gr.Tab("Image Depth Prediction"):
61
- with gr.Row():
62
- input_image = gr.Image(label="Input Image", type='numpy', elem_id='img-display-input')
63
- depth_image_slider = ImageSlider(label="Depth Map with Slider View", elem_id='img-display-output', position=0.5)
64
- raw_file = gr.File(label="16-bit raw depth (can be considered as disparity)")
65
- submit = gr.Button("Submit")
66
 
67
- def on_submit(image):
68
- original_image = image.copy()
69
 
70
- h, w = image.shape[:2]
71
 
72
- image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) / 255.0
73
- image = transform({'image': image})['image']
74
- image = torch.from_numpy(image).unsqueeze(0).to(DEVICE)
75
 
76
- depth = predict_depth(model, image)
77
- depth = F.interpolate(depth[None], (h, w), mode='bilinear', align_corners=False)[0, 0]
78
 
79
- raw_depth = Image.fromarray(depth.cpu().numpy().astype('uint16'))
80
- tmp = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
81
- raw_depth.save(tmp.name)
82
 
83
- depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
84
- depth = depth.cpu().numpy().astype(np.uint8)
85
- colored_depth = cv2.applyColorMap(depth, cv2.COLORMAP_INFERNO)[:, :, ::-1]
86
 
87
- return [(original_image, colored_depth), tmp.name]
88
 
89
- submit.click(on_submit, inputs=[input_image], outputs=[depth_image_slider, raw_file])
90
 
91
- example_files = os.listdir('examples')
92
- example_files.sort()
93
- example_files = [os.path.join('examples', filename) for filename in example_files]
94
- examples = gr.Examples(examples=example_files, inputs=[input_image], outputs=[depth_image_slider, raw_file], fn=on_submit, cache_examples=True)
95
 
96
 
97
  if __name__ == '__main__':
98
- demo.queue().launch()
 
3
  import numpy as np
4
  import os
5
  from PIL import Image
6
+ import spaces
7
  import torch
8
  import torch.nn.functional as F
9
  from torchvision.transforms import Compose
 
25
  }
26
  """
27
  DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
28
+ encoder = 'vitl' # can also be 'vitb' or 'vitl'
29
+ model = DepthAnything.from_pretrained(f"LiheYoung/depth_anything_{encoder}14").to(DEVICE).eval()
30
 
31
  title = "# Depth Anything"
32
  description = """Official demo for **Depth Anything: Unleashing the Power of Large-Scale Unlabeled Data**.
 
33
  Please refer to our [paper](https://arxiv.org/abs/2401.10891), [project page](https://depth-anything.github.io), or [github](https://github.com/LiheYoung/Depth-Anything) for more details."""
34
 
35
  transform = Compose([
 
46
  PrepareForNet(),
47
  ])
48
 
49
+ @spaces.GPU
 
50
  @torch.no_grad()
51
  def predict_depth(model, image):
52
  return model(image)
53
 
54
+
55
  with gr.Blocks(css=css) as demo:
56
  gr.Markdown(title)
57
  gr.Markdown(description)
58
  gr.Markdown("### Depth Prediction demo")
59
  gr.Markdown("You can slide the output to compare the depth prediction with input image")
60
 
61
+ with gr.Row():
62
+ input_image = gr.Image(label="Input Image", type='numpy', elem_id='img-display-input')
63
+ depth_image_slider = ImageSlider(label="Depth Map with Slider View", elem_id='img-display-output', position=0.5,)
64
+ raw_file = gr.File(label="16-bit raw depth (can be considered as disparity)")
65
+ submit = gr.Button("Submit")
 
66
 
67
+ def on_submit(image):
68
+ original_image = image.copy()
69
 
70
+ h, w = image.shape[:2]
71
 
72
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) / 255.0
73
+ image = transform({'image': image})['image']
74
+ image = torch.from_numpy(image).unsqueeze(0).to(DEVICE)
75
 
76
+ depth = predict_depth(model, image)
77
+ depth = F.interpolate(depth[None], (h, w), mode='bilinear', align_corners=False)[0, 0]
78
 
79
+ raw_depth = Image.fromarray(depth.cpu().numpy().astype('uint16'))
80
+ tmp = tempfile.NamedTemporaryFile(suffix='.png', delete=False)
81
+ raw_depth.save(tmp.name)
82
 
83
+ depth = (depth - depth.min()) / (depth.max() - depth.min()) * 255.0
84
+ depth = depth.cpu().numpy().astype(np.uint8)
85
+ colored_depth = cv2.applyColorMap(depth, cv2.COLORMAP_INFERNO)[:, :, ::-1]
86
 
87
+ return [(original_image, colored_depth), tmp.name]
88
 
89
+ submit.click(on_submit, inputs=[input_image], outputs=[depth_image_slider, raw_file])
90
 
91
+ example_files = os.listdir('examples')
92
+ example_files.sort()
93
+ example_files = [os.path.join('examples', filename) for filename in example_files]
94
+ examples = gr.Examples(examples=example_files, inputs=[input_image], outputs=[depth_image_slider, raw_file], fn=on_submit, cache_examples=True)
95
 
96
 
97
  if __name__ == '__main__':
98
+ demo.queue().launch()