lemonaddie commited on
Commit
19975ba
1 Parent(s): aae9b16

Update app2.py

Browse files
Files changed (1) hide show
  1. app2.py +6 -12
app2.py CHANGED
@@ -45,14 +45,12 @@ from torchvision.transforms import InterpolationMode
45
 
46
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
47
 
48
- stable_diffusion_repo_path = '.'
49
- vae = AutoencoderKL.from_pretrained(stable_diffusion_repo_path, subfolder='vae')
50
- scheduler = DDIMScheduler.from_pretrained(stable_diffusion_repo_path, subfolder='scheduler')
51
- sd_image_variations_diffusers_path = '.'
52
- image_encoder = CLIPVisionModelWithProjection.from_pretrained(sd_image_variations_diffusers_path, subfolder="image_encoder")
53
- feature_extractor = CLIPImageProcessor.from_pretrained(sd_image_variations_diffusers_path, subfolder="feature_extractor")
54
 
55
- unet = UNet2DConditionModel.from_pretrained('./wocfg/unet_ema')
56
 
57
  pipe = DepthNormalEstimationPipeline(vae=vae,
58
  image_encoder=image_encoder,
@@ -73,7 +71,6 @@ def depth_normal(img,
73
  denoising_steps,
74
  ensemble_size,
75
  processing_res,
76
- #guidance_scale,
77
  seed,
78
  domain):
79
 
@@ -86,7 +83,6 @@ def depth_normal(img,
86
  ensemble_size=ensemble_size,
87
  processing_res=processing_res,
88
  batch_size=0,
89
- #guidance_scale=guidance_scale,
90
  domain=domain,
91
  show_progress_bar=True,
92
  )
@@ -131,7 +127,6 @@ def run_demo():
131
  gr.Examples(
132
  examples=example_fns,
133
  inputs=[input_image],
134
- # outputs=[input_image],
135
  cache_examples=False,
136
  label='Examples (click one of the images below to start)',
137
  examples_per_page=30
@@ -162,7 +157,7 @@ def run_demo():
162
  minimum=1,
163
  maximum=15,
164
  step=1,
165
- value=1,
166
  )
167
  seed = gr.Number(42, label='Seed. May try different seed for better results.')
168
 
@@ -188,7 +183,6 @@ def run_demo():
188
  inputs=[input_image, denoising_steps,
189
  ensemble_size,
190
  processing_res,
191
- #guidance_scale,
192
  seed,
193
  domain],
194
  outputs=[depth, normal]
 
45
 
46
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
47
 
48
+ vae = AutoencoderKL.from_pretrained('.', subfolder='vae')
49
+ scheduler = DDIMScheduler.from_pretrained('.', subfolder='scheduler')
50
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained('.', subfolder="image_encoder")
51
+ feature_extractor = CLIPImageProcessor.from_pretrained('.', subfolder="feature_extractor")
 
 
52
 
53
+ unet = UNet2DConditionModel.from_pretrained('./unet')
54
 
55
  pipe = DepthNormalEstimationPipeline(vae=vae,
56
  image_encoder=image_encoder,
 
71
  denoising_steps,
72
  ensemble_size,
73
  processing_res,
 
74
  seed,
75
  domain):
76
 
 
83
  ensemble_size=ensemble_size,
84
  processing_res=processing_res,
85
  batch_size=0,
 
86
  domain=domain,
87
  show_progress_bar=True,
88
  )
 
127
  gr.Examples(
128
  examples=example_fns,
129
  inputs=[input_image],
 
130
  cache_examples=False,
131
  label='Examples (click one of the images below to start)',
132
  examples_per_page=30
 
157
  minimum=1,
158
  maximum=15,
159
  step=1,
160
+ value=4,
161
  )
162
  seed = gr.Number(42, label='Seed. May try different seed for better results.')
163
 
 
183
  inputs=[input_image, denoising_steps,
184
  ensemble_size,
185
  processing_res,
 
186
  seed,
187
  domain],
188
  outputs=[depth, normal]