lemonaddie commited on
Commit
0d89806
1 Parent(s): ed4d1ff

Create app2.py

Browse files
Files changed (1) hide show
  1. app2.py +190 -0
app2.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import os
3
+ import shutil
4
+ import sys
5
+ import git
6
+
7
+ import gradio as gr
8
+ import numpy as np
9
+ import torch as torch
10
+ from PIL import Image
11
+
12
+ from gradio_imageslider import ImageSlider
13
+
14
+ import spaces
15
+
16
+ import fire
17
+
18
+ import argparse
19
+ import os
20
+ import logging
21
+
22
+ import numpy as np
23
+ import torch
24
+ from PIL import Image
25
+ from tqdm.auto import tqdm
26
+ import glob
27
+ import json
28
+ import cv2
29
+
30
+ import sys
31
+ sys.path.append("../")
32
+ from models.depth_normal_pipeline_clip import DepthNormalEstimationPipeline
33
+ from utils.seed_all import seed_all
34
+ import matplotlib.pyplot as plt
35
+ from dataloader.file_io import read_hdf5, align_normal, creat_uv_mesh
36
+ from utils.de_normalized import align_scale_shift
37
+ from utils.depth2normal import *
38
+
39
+ from diffusers import DiffusionPipeline, DDIMScheduler, AutoencoderKL
40
+ from models.unet_2d_condition import UNet2DConditionModel
41
+
42
+ from transformers import CLIPTextModel, CLIPTokenizer
43
+ from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
44
+ import torchvision.transforms.functional as TF
45
+ from torchvision.transforms import InterpolationMode
46
+
47
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
48
+ pipe = DepthNormalEstimationPipeline.from_pretrained(CHECKPOINT)
49
+
50
+ try:
51
+ import xformers
52
+ pipe.enable_xformers_memory_efficient_attention()
53
+ except:
54
+ pass # run without xformers
55
+
56
+ pipe = pipe.to(device)
57
+ #run_demo_server(pipe)
58
+
59
+ @spaces.GPU
60
+ def depth_normal(img,
61
+ denoising_steps,
62
+ ensemble_size,
63
+ processing_res,
64
+ guidance_scale,
65
+ domain):
66
+
67
+ #img = img.resize((processing_res, processing_res), Image.Resampling.LANCZOS)
68
+ pipe_out = pipe(
69
+ img,
70
+ denoising_steps=denoising_steps,
71
+ ensemble_size=ensemble_size,
72
+ processing_res=processing_res,
73
+ batch_size=0,
74
+ guidance_scale=guidance_scale,
75
+ domain=domain,
76
+ show_progress_bar=True,
77
+ )
78
+
79
+ depth_colored = pipe_out.depth_colored
80
+ normal_colored = pipe_out.normal_colored
81
+
82
+ return depth_colored, normal_colored
83
+
84
+
85
+
86
+ def run_demo():
87
+
88
+
89
+ custom_theme = gr.themes.Soft(primary_hue="blue").set(
90
+ button_secondary_background_fill="*neutral_100",
91
+ button_secondary_background_fill_hover="*neutral_200")
92
+ custom_css = '''#disp_image {
93
+ text-align: center; /* Horizontally center the content */
94
+ }'''
95
+
96
+ _TITLE = '''GeoWizard: Unleashing the Diffusion Priors for 3D Geometry Estimation from a Single Image'''
97
+ _DESCRIPTION = '''
98
+ <div>
99
+ Generate consistent depth and normal from single image. High quality and rich details.
100
+ <a style="display:inline-block; margin-left: .5em" href='https://github.com/fuxiao0719/GeoWizard/'><img src='https://img.shields.io/github/stars/fuxiao0719/GeoWizard?style=social' /></a>
101
+ </div>
102
+ '''
103
+ _GPU_ID = 0
104
+
105
+ with gr.Blocks(title=_TITLE, theme=custom_theme, css=custom_css) as demo:
106
+ with gr.Row():
107
+ with gr.Column(scale=1):
108
+ gr.Markdown('# ' + _TITLE)
109
+ gr.Markdown(_DESCRIPTION)
110
+ with gr.Row(variant='panel'):
111
+ with gr.Column(scale=1):
112
+ input_image = gr.Image(type='pil', image_mode='RGBA', height=320, label='Input image')
113
+
114
+ example_folder = os.path.join(os.path.dirname(__file__), "./files")
115
+ example_fns = [os.path.join(example_folder, example) for example in os.listdir(example_folder)]
116
+ gr.Examples(
117
+ examples=example_fns,
118
+ inputs=[input_image],
119
+ # outputs=[input_image],
120
+ cache_examples=False,
121
+ label='Examples (click one of the images below to start)',
122
+ examples_per_page=30
123
+ )
124
+ with gr.Column(scale=1):
125
+
126
+ with gr.Accordion('Advanced options', open=True):
127
+ with gr.Column():
128
+
129
+ domain = gr.Radio(
130
+ [
131
+ ("Outdoor", "outdoor"),
132
+ ("Indoor", "indoor"),
133
+ ("Object", "object"),
134
+ ],
135
+ label="Data Type (Must Select One matches your image)",
136
+ value="indoor",
137
+ )
138
+ guidance_scale = gr.Slider(
139
+ label="Classifier Free Guidance Scale",
140
+ minimum=1,
141
+ maximum=5,
142
+ step=1,
143
+ value=3,
144
+ )
145
+ denoising_steps = gr.Slider(
146
+ label="Number of denoising steps (More stepes, better quality)",
147
+ minimum=1,
148
+ maximum=50,
149
+ step=1,
150
+ value=20,
151
+ )
152
+ ensemble_size = gr.Slider(
153
+ label="Ensemble size (1 will be enough. More steps, higher accuracy)",
154
+ minimum=1,
155
+ maximum=15,
156
+ step=1,
157
+ value=1,
158
+ )
159
+ processing_res = gr.Radio(
160
+ [
161
+ ("Native", 0),
162
+ ("Recommended", 768),
163
+ ],
164
+ label="Processing resolution",
165
+ value=768,
166
+ )
167
+
168
+
169
+ run_btn = gr.Button('Generate', variant='primary', interactive=True)
170
+ with gr.Row():
171
+ with gr.Column():
172
+ depth = gr.Image(interactive=False, show_label=False)
173
+ with gr.Column():
174
+ normal = gr.Image(interactive=False, show_label=False)
175
+
176
+
177
+ run_btn.click(fn=depth_normal,
178
+ inputs=[input_image, denoising_steps,
179
+ ensemble_size,
180
+ processing_res,
181
+ guidance_scale,
182
+ domain],
183
+ outputs=[depth, normal]
184
+ )
185
+ demo.queue().launch(share=True, max_threads=80)
186
+
187
+
188
+ if __name__ == '__main__':
189
+ fire.Fire(run_demo)
190
+