multimodalart HF staff commited on
Commit
e16e0d9
1 Parent(s): e2feb29

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -21,7 +21,7 @@ if not torch.cuda.is_available():
21
  MAX_SEED = np.iinfo(np.int32).max
22
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
23
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536"))
24
- USE_TORCH_COMPILE = False
25
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
26
  PREVIEW_IMAGES = False #not working for now
27
 
@@ -40,7 +40,7 @@ if torch.cuda.is_available():
40
 
41
  if USE_TORCH_COMPILE:
42
  prior_pipeline.prior = torch.compile(prior_pipeline.prior, mode="max-autotune", fullgraph=True)
43
- decoder_pipeline.decoder = torch.compile(decoder_pipeline.decoder, mode="max-autotune", fullgraph=True)
44
 
45
  if PREVIEW_IMAGES:
46
  pass
 
21
  MAX_SEED = np.iinfo(np.int32).max
22
  CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
23
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536"))
24
+ USE_TORCH_COMPILE = True
25
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
26
  PREVIEW_IMAGES = False #not working for now
27
 
 
40
 
41
  if USE_TORCH_COMPILE:
42
  prior_pipeline.prior = torch.compile(prior_pipeline.prior, mode="max-autotune", fullgraph=True)
43
+ #decoder_pipeline.decoder = torch.compile(decoder_pipeline.decoder, mode="max-autotune", fullgraph=True)
44
 
45
  if PREVIEW_IMAGES:
46
  pass