pengHTYX commited on
Commit
9a8311b
1 Parent(s): 92f9bc3
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -165,6 +165,9 @@ def prepare_data(single_image, crop_size, cfg):
165
  scene = 'scene'
166
  @spaces.GPU
167
  def run_pipeline(pipeline, cfg, single_image, guidance_scale, steps, seed, crop_size, chk_group=None):
 
 
 
168
  global scene
169
  # pdb.set_trace()
170
 
@@ -186,6 +189,10 @@ def run_pipeline(pipeline, cfg, single_image, guidance_scale, steps, seed, crop_
186
  prompt_embeddings = torch.cat([normal_prompt_embeddings, clr_prompt_embeddings], dim=0)
187
  prompt_embeddings = rearrange(prompt_embeddings, "B Nv N C -> (B Nv) N C")
188
 
 
 
 
 
189
  ic(pipeline.unet.device)
190
  ic(pipeline.unet.dtype)
191
 
@@ -304,8 +311,7 @@ def run_demo():
304
 
305
  pipeline = load_era3d_pipeline(cfg)
306
  torch.set_grad_enabled(False)
307
- pipeline.to(device=f'cuda:{_GPU_ID}')
308
- # pipeline.unet.enable_xformers_memory_efficient_attention()
309
 
310
  predictor = sam_init()
311
 
 
165
  scene = 'scene'
166
  @spaces.GPU
167
  def run_pipeline(pipeline, cfg, single_image, guidance_scale, steps, seed, crop_size, chk_group=None):
168
+ pipeline.to(device=f'cuda:{_GPU_ID}')
169
+ pipeline.unet.enable_xformers_memory_efficient_attention()
170
+
171
  global scene
172
  # pdb.set_trace()
173
 
 
189
  prompt_embeddings = torch.cat([normal_prompt_embeddings, clr_prompt_embeddings], dim=0)
190
  prompt_embeddings = rearrange(prompt_embeddings, "B Nv N C -> (B Nv) N C")
191
 
192
+
193
+ imgs_in = imgs_in.to(device=f'cuda:{_GPU_ID}', dtype=weight_dtype)
194
+ prompt_embeddings = prompt_embeddings.to(device=f'cuda:{_GPU_ID}', dtype=weight_dtype)
195
+
196
  ic(pipeline.unet.device)
197
  ic(pipeline.unet.dtype)
198
 
 
311
 
312
  pipeline = load_era3d_pipeline(cfg)
313
  torch.set_grad_enabled(False)
314
+
 
315
 
316
  predictor = sam_init()
317