developy commited on
Commit
a7638ff
·
verified ·
1 Parent(s): f349f08

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -151
app.py CHANGED
@@ -1,163 +1,29 @@
1
- import os
2
- os.system("pip freeze")
3
- import spaces
4
-
5
  import gradio as gr
6
- import torch as torch
7
  from diffusers import MarigoldDepthPipeline, DDIMScheduler
8
- from gradio_dualvision import DualVisionApp
9
- from huggingface_hub import login
10
  from PIL import Image
11
 
12
  CHECKPOINT = "developy/ApDepth"
13
 
14
- if "HF_TOKEN_LOGIN" in os.environ:
15
- login(token=os.environ["HF_TOKEN_LOGIN"])
16
-
17
  device = "cpu"
18
- dtype = torch.bfloat16 if torch.cuda.is_available() else torch.float32
19
 
20
  pipe = MarigoldDepthPipeline.from_pretrained(CHECKPOINT)
21
  pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
22
  pipe = pipe.to(device=device, dtype=dtype)
23
- try:
24
- import xformers
25
- pipe.enable_xformers_memory_efficient_attention()
26
- except:
27
- pass
28
-
29
-
30
- class MarigoldDepthApp(DualVisionApp):
31
- DEFAULT_SEED = 2024
32
- DEFAULT_ENSEMBLE_SIZE = 1
33
- DEFAULT_DENOISE_STEPS = 1
34
- DEFAULT_PROCESSING_RES = 768
35
-
36
- def make_header(self):
37
- gr.Markdown(
38
- """
39
- <h2><a href="https://huggingface.co/spaces/prs-eth/marigold" style="color: black;">Marigold Depth Estimation</a></h2>
40
- """
41
- )
42
- with gr.Row(elem_classes="remove-elements"):
43
- gr.Markdown(
44
- f"""
45
- <p align="center">
46
- <a title="Website" href="https://marigoldcomputervision.github.io/" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
47
- <img src="https://img.shields.io/badge/%E2%99%A5%20Project%20-Website-blue">
48
- </a>
49
- <a title="diffusers" href="https://huggingface.co/docs/diffusers/using-diffusers/marigold_usage" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
50
- <img src="https://img.shields.io/badge/%F0%9F%A7%A8%20Read_diffusers-Tutorial-yellow?labelColor=green">
51
- </a>
52
- <a title="arXiv" href="https://arxiv.org/abs/2505.09358" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
53
- <img src="https://img.shields.io/badge/%F0%9F%93%84%20Read%20-Paper-AF3436">
54
- </a>
55
- <a title="Github" href="https://github.com/prs-eth/marigold" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
56
- <img src="https://img.shields.io/github/stars/prs-eth/marigold?label=GitHub%20%E2%98%85&logo=github&color=C8C" alt="badge-github-stars">
57
- </a>
58
- <a title="Image Normals" href="https://huggingface.co/spaces/prs-eth/marigold-normals" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
59
- <img src="https://img.shields.io/badge/%F0%9F%A4%97%20Image%20Normals%20-Demo-yellow" alt="imagedepth">
60
- </a>
61
- <a title="Image Intrinsics" href="https://huggingface.co/spaces/prs-eth/marigold-iid" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
62
- <img src="https://img.shields.io/badge/%F0%9F%A4%97%20Image%20Intrinsics%20-Demo-yellow" alt="imagedepth">
63
- </a>
64
- <a title="LiDAR Depth" href="https://huggingface.co/spaces/prs-eth/marigold-dc" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
65
- <img src="https://img.shields.io/badge/%F0%9F%A4%97%20LiDAR%20Depth%20-Demo-yellow" alt="imagedepth">
66
- </a>
67
- <a title="Video Depth" href="https://huggingface.co/spaces/prs-eth/rollingdepth" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
68
- <img src="https://img.shields.io/badge/%F0%9F%A4%97%20Video%20Depth%20-Demo-yellow" alt="videodepth">
69
- </a>
70
- <a title="Depth-to-3D" href="https://huggingface.co/spaces/prs-eth/depth-to-3d-print" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
71
- <img src="https://img.shields.io/badge/%F0%9F%A4%97%20Depth--to--3D%20-Demo-yellow" alt="depthto3d">
72
- </a>
73
- <a title="Social" href="https://twitter.com/antonobukhov1" target="_blank" rel="noopener noreferrer" style="display: inline-block;">
74
- <img src="https://shields.io/twitter/follow/:?label=Subscribe%20for%20updates!" alt="social">
75
- </a>
76
- </p>
77
- <p align="center" style="margin-top: 0px;">
78
- Upload a photo or select an example below to compute depth maps in real time.
79
- Use the slider to reveal areas of interest.
80
- Use the radio-buttons to switch between modalities.
81
- Check our other demo badges above for new or relocated functionality.
82
- </p>
83
- """
84
- )
85
-
86
- def build_user_components(self):
87
- with gr.Column():
88
- ensemble_size = gr.Slider(
89
- label="Ensemble size",
90
- minimum=1,
91
- maximum=10,
92
- step=1,
93
- value=self.DEFAULT_ENSEMBLE_SIZE,
94
- )
95
- denoise_steps = gr.Slider(
96
- label="Number of denoising steps",
97
- minimum=1,
98
- maximum=20,
99
- step=1,
100
- value=self.DEFAULT_DENOISE_STEPS,
101
- )
102
- processing_res = gr.Radio(
103
- [
104
- ("Native", 0),
105
- ("Recommended", 768),
106
- ],
107
- label="Processing resolution",
108
- value=self.DEFAULT_PROCESSING_RES,
109
- )
110
- return {
111
- "ensemble_size": ensemble_size,
112
- "denoise_steps": denoise_steps,
113
- "processing_res": processing_res,
114
- }
115
-
116
- def process(self, image_in: Image.Image, **kwargs):
117
- ensemble_size = kwargs.get("ensemble_size", self.DEFAULT_ENSEMBLE_SIZE)
118
- denoise_steps = kwargs.get("denoise_steps", self.DEFAULT_DENOISE_STEPS)
119
- processing_res = kwargs.get("processing_res", self.DEFAULT_PROCESSING_RES)
120
- generator = torch.Generator(device=device).manual_seed(self.DEFAULT_SEED)
121
-
122
- pipe_out = pipe(
123
- image_in,
124
- ensemble_size=ensemble_size,
125
- num_inference_steps=denoise_steps,
126
- processing_resolution=processing_res,
127
- batch_size=1 if processing_res == 0 else 2,
128
- output_uncertainty=ensemble_size >= 3,
129
- generator=generator,
130
- )
131
-
132
- depth_vis = pipe.image_processor.visualize_depth(pipe_out.prediction)[0]
133
- depth_16bit = pipe.image_processor.export_depth_to_16bit_png(pipe_out.prediction)[0]
134
-
135
- out_modalities = {
136
- "Depth Visualization": depth_vis,
137
- "Depth 16-bit": depth_16bit,
138
- }
139
- if ensemble_size >= 3:
140
- uncertainty = pipe.image_processor.visualize_uncertainty(pipe_out.uncertainty)[0]
141
- out_modalities["Uncertainty"] = uncertainty
142
-
143
- out_settings = {
144
- "ensemble_size": ensemble_size,
145
- "denoise_steps": denoise_steps,
146
- "processing_res": processing_res,
147
- }
148
- return out_modalities, out_settings
149
-
150
 
151
- with MarigoldDepthApp(
152
- title="ApDepth",
153
- examples_path="files",
154
- examples_per_page=12,
155
- squeeze_canvas=True,
156
- spaces_zero_gpu_enabled=True,
157
- ) as demo:
158
- demo.queue(
159
- api_open=False,
160
- ).launch(
161
- server_name="0.0.0.0",
162
- server_port=7860,
163
- )
 
 
 
 
 
 
 
1
  import gradio as gr
 
2
  from diffusers import MarigoldDepthPipeline, DDIMScheduler
3
+ import torch
 
4
  from PIL import Image
5
 
6
  CHECKPOINT = "developy/ApDepth"
7
 
 
 
 
8
  device = "cpu"
9
+ dtype = torch.float32
10
 
11
  pipe = MarigoldDepthPipeline.from_pretrained(CHECKPOINT)
12
  pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
13
  pipe = pipe.to(device=device, dtype=dtype)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ def predict(image: Image.Image):
16
+ out = pipe(image)
17
+ depth_vis = pipe.image_processor.visualize_depth(out.prediction)[0]
18
+ return depth_vis
19
+
20
+ demo = gr.Interface(
21
+ fn=predict,
22
+ inputs=gr.Image(type="pil", label="Input Image"),
23
+ outputs=gr.Image(type="pil", label="Depth Map"),
24
+ title="ApDepth Demo",
25
+ description="Monocular Depth Estimation based on Marigold"
26
+ )
27
+
28
+ if __name__ == "__main__":
29
+ demo.launch()