aliabd HF staff commited on
Commit
60f8f27
1 Parent(s): 04ad25b

Upload with huggingface_hub

Browse files
Files changed (3) hide show
  1. DESCRIPTION.md +1 -0
  2. README.md +1 -1
  3. app.py +0 -8
DESCRIPTION.md ADDED
@@ -0,0 +1 @@
 
 
1
+ A demo for predicting the depth of an image and generating a 3D model of it.
README.md CHANGED
@@ -1,7 +1,7 @@
1
 
2
  ---
3
  title: depth_estimation
4
- emoji: 🤗
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
 
1
 
2
  ---
3
  title: depth_estimation
4
+ emoji: 🔥
5
  colorFrom: indigo
6
  colorTo: indigo
7
  sdk: gradio
app.py CHANGED
@@ -1,6 +1,3 @@
1
- # URL: https://huggingface.co/spaces/radames/dpt-depth-estimation-3d-obj
2
- # DESCRIPTION: A demo for predicting the depth of an image and generating a 3D model of it.
3
- # imports
4
  import gradio as gr
5
  from transformers import DPTFeatureExtractor, DPTForDepthEstimation
6
  import torch
@@ -10,11 +7,9 @@ import open3d as o3d
10
  from pathlib import Path
11
  import os
12
 
13
- # load the model
14
  feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
15
  model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
16
 
17
- # define the core and helper functions
18
  def process_image(image_path):
19
  image_path = Path(image_path)
20
  image_raw = Image.open(image_path)
@@ -103,12 +98,10 @@ def create_3d_obj(rgb_image, depth_image, image_path, depth=10):
103
  gltf_path, mesh_crop, write_triangle_uvs=True)
104
  return gltf_path
105
 
106
- # define the title, description and examples
107
  title = "Demo: zero-shot depth estimation with DPT + 3D Point Cloud"
108
  description = "This demo is a variation from the original <a href='https://huggingface.co/spaces/nielsr/dpt-depth-estimation' target='_blank'>DPT Demo</a>. It uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object."
109
  examples = [["examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg"]]
110
 
111
- # define an interface with one Image input and 3 outputs: Image, Model3D and File
112
  iface = gr.Interface(fn=process_image,
113
  inputs=[gr.Image(
114
  type="filepath", label="Input Image")],
@@ -122,5 +115,4 @@ iface = gr.Interface(fn=process_image,
122
  allow_flagging="never",
123
  cache_examples=False)
124
 
125
- # launch
126
  iface.launch(debug=True, enable_queue=False)
 
 
 
 
1
  import gradio as gr
2
  from transformers import DPTFeatureExtractor, DPTForDepthEstimation
3
  import torch
 
7
  from pathlib import Path
8
  import os
9
 
 
10
  feature_extractor = DPTFeatureExtractor.from_pretrained("Intel/dpt-large")
11
  model = DPTForDepthEstimation.from_pretrained("Intel/dpt-large")
12
 
 
13
  def process_image(image_path):
14
  image_path = Path(image_path)
15
  image_raw = Image.open(image_path)
 
98
  gltf_path, mesh_crop, write_triangle_uvs=True)
99
  return gltf_path
100
 
 
101
  title = "Demo: zero-shot depth estimation with DPT + 3D Point Cloud"
102
  description = "This demo is a variation from the original <a href='https://huggingface.co/spaces/nielsr/dpt-depth-estimation' target='_blank'>DPT Demo</a>. It uses the DPT model to predict the depth of an image and then uses 3D Point Cloud to create a 3D object."
103
  examples = [["examples/1-jonathan-borba-CgWTqYxHEkg-unsplash.jpg"]]
104
 
 
105
  iface = gr.Interface(fn=process_image,
106
  inputs=[gr.Image(
107
  type="filepath", label="Input Image")],
 
115
  allow_flagging="never",
116
  cache_examples=False)
117
 
 
118
  iface.launch(debug=True, enable_queue=False)