multimodalart HF staff commited on
Commit
fdb346e
1 Parent(s): d7f6798

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -5
app.py CHANGED
@@ -24,7 +24,7 @@ CACHE_EXAMPLES = False #torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES"
24
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536"))
25
  USE_TORCH_COMPILE = False
26
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
27
- PREVIEW_IMAGES = True
28
 
29
  dtype = torch.bfloat16
30
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
@@ -86,7 +86,7 @@ def generate(
86
  profile: gr.OAuthProfile | None = None,
87
  ) -> PIL.Image.Image:
88
 
89
- previewer.eval().requires_grad_(False).to(device).to(dtype)
90
  prior_pipeline.to(device)
91
  decoder_pipeline.to(device)
92
 
@@ -102,10 +102,9 @@ def generate(
102
  guidance_scale=prior_guidance_scale,
103
  num_images_per_prompt=num_images_per_prompt,
104
  generator=generator,
105
- callback_on_step_end=callback_prior,
106
- callback_on_step_end_tensor_inputs=['latents']
107
  )
108
- print(prior_output)
109
  if PREVIEW_IMAGES:
110
  for _ in range(len(DEFAULT_STAGE_C_TIMESTEPS)):
111
  r = next(prior_output)
 
24
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536"))
25
  USE_TORCH_COMPILE = False
26
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
27
+ PREVIEW_IMAGES = False
28
 
29
  dtype = torch.bfloat16
30
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
 
86
  profile: gr.OAuthProfile | None = None,
87
  ) -> PIL.Image.Image:
88
 
89
+ #previewer.eval().requires_grad_(False).to(device).to(dtype)
90
  prior_pipeline.to(device)
91
  decoder_pipeline.to(device)
92
 
 
102
  guidance_scale=prior_guidance_scale,
103
  num_images_per_prompt=num_images_per_prompt,
104
  generator=generator,
105
+ #callback_on_step_end=callback_prior,
106
+ #callback_on_step_end_tensor_inputs=['latents']
107
  )
 
108
  if PREVIEW_IMAGES:
109
  for _ in range(len(DEFAULT_STAGE_C_TIMESTEPS)):
110
  r = next(prior_output)