multimodalart HF staff commited on
Commit
b468df7
1 Parent(s): 132d74a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -20,7 +20,7 @@ if not torch.cuda.is_available():
20
  DESCRIPTION += "\n<p>Running on CPU 🥶</p>"
21
 
22
  MAX_SEED = np.iinfo(np.int32).max
23
- CACHE_EXAMPLES = False #torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") != "0"
24
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536"))
25
  USE_TORCH_COMPILE = False
26
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
 
20
  DESCRIPTION += "\n<p>Running on CPU 🥶</p>"
21
 
22
  MAX_SEED = np.iinfo(np.int32).max
23
+ CACHE_EXAMPLES = "lazy" #torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") != "0"
24
  MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "1536"))
25
  USE_TORCH_COMPILE = False
26
  ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"